gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test the capabilities of the database access editor model.
"""
import pytest
import enaml
from enaml.widgets.api import FlowArea, Menu
from exopy.utils.container_change import ContainerChange
from exopy.tasks.api import RootTask, ComplexTask, SimpleTask
from exopy.measurement.editors.api import Editor
from exopy.measurement.editors.database_access_editor.editor_model import\
EditorModel
from exopy.testing.util import wait_for_window_displayed
with enaml.imports():
from exopy.measurement.editors.database_access_editor import\
DatabaseAccessEditor
from exopy.testing.windows import PageTestingWindow
@pytest.fixture
def task():
"""Task used to test the editor.
Root:
SimpleTask: simp1, entries: t
ComplexTask: comp1, entries: t1, t2
SimpleTask: simp2, entris: t
ComplexTask: comp2, entries: t1, t2
SimpleTask: simp3, entries: t
"""
r = RootTask()
r.add_child_task(0, SimpleTask(name='simp1', database_entries={'t': 1}))
c = ComplexTask(name='comp1', database_entries={'t1': 2, 't2': 'r'})
c.add_child_task(0,
SimpleTask(name='simp2', database_entries={'t': 1}))
c2 = ComplexTask(name='comp2', database_entries={'t1': 2, 't2': 'r'})
c2.add_child_task(0,
SimpleTask(name='simp3', database_entries={'t': 1}))
c.add_child_task(1, c2)
r.add_child_task(1, c)
return r
def test_node_creation(task):
"""Test creating the editor when exceptions already exists.
"""
# Add an access exception to the deepest level
simp3 = task.children[1].children[1].children[0]
simp3.add_access_exception('t', 1)
ed = EditorModel(root=task)
assert ed.nodes[simp3.path].has_exceptions
def test_node_sorting(task):
"""Test that a node model correctly order its children and react to
task re-ordering.
"""
ed = EditorModel(root=task)
nmodel = ed.nodes['root']
task.add_child_task(0, ComplexTask(name='cc'))
nmodel.sort_nodes()
assert [c.task.name for c in nmodel.children] == ['cc', 'comp1']
assert sorted(nmodel.entries) == sorted(['default_path', 'simp1_t',
'comp1_t1', 'comp1_t2'])
task.move_child_task(0, 2)
assert [c.task.name for c in nmodel.children] == ['comp1', 'cc']
assert (sorted(nmodel.children[0].entries) ==
sorted(['simp2_t', 'comp2_t1', 'comp2_t2']))
change = ContainerChange(collapsed=[ContainerChange()])
nmodel._react_to_task_children_event(change) # For coverage
def test_editor_modifying_exception_level(task):
"""Test modifying the level of an access exception.
"""
ed = EditorModel(root=task)
rnode = ed.nodes['root']
parent_node = rnode.children[0]
node = parent_node.children[0]
# Check that we can desambiguate between task with same prefix
node.task.add_child_task(0, SimpleTask(name='simp3_t',
database_entries={'t': 1}))
node.add_exception('simp3_t')
assert 'simp3_t' in parent_node.exceptions
assert 't' in node.task.children[1].access_exs
ed.increase_exc_level('root/comp1', 'simp3_t')
assert 'simp3_t' not in parent_node.exceptions
assert 'simp3_t' in rnode.exceptions
ed.decrease_exc_level('root', 'simp3_t')
assert 'simp3_t' in parent_node.exceptions
assert 'simp3_t' not in rnode.exceptions
ed.decrease_exc_level('root/comp1', 'simp3_t')
assert 'simp3_t' not in parent_node.exceptions
assert 't' not in node.task.children[1].access_exs
parent_node.add_exception('simp2_t')
assert 'simp2_t' in rnode.exceptions
def test_editor_changing_root(task):
"""Setting a new root.
"""
ed = EditorModel(root=RootTask())
assert len(ed.nodes) == 1
ed.root = task
assert len(ed.nodes) == 3
assert ('root' in ed.nodes and 'root/comp1' in ed.nodes and
'root/comp1/comp2' in ed.nodes)
assert ed.nodes['root/comp1'] in ed.nodes['root'].children
assert ed.nodes['root/comp1/comp2'] in ed.nodes['root/comp1'].children
def test_handling_entry_modification(task):
"""Test handling the possible modifications at the entry level.
"""
ed = EditorModel(root=task)
child = task.children[1].children[0]
entries = child.database_entries.copy()
entries['t2'] = 1
child.database_entries = entries
assert 'simp2_t2' in ed.nodes['root/comp1'].entries
child = task.children[1].children[1]
child.name = 'cc'
assert 'cc_t1' in ed.nodes['root/comp1'].entries
assert 'cc_t2' in ed.nodes['root/comp1'].entries
assert 'comp2_t1' not in ed.nodes['root/comp1'].entries
assert 'comp2_t2' not in ed.nodes['root/comp1'].entries
child = task.children[1].children[1].children[0]
child.add_access_exception('t', 2)
assert 'simp3_t' in ed.nodes['root'].exceptions
child.database_entries = {}
assert not ed.nodes['root/comp1/cc'].entries
assert 'simp2_t' not in ed.nodes['root'].exceptions
def test_handling_exceptions_modifications(task):
"""Test handling the possible modifictaion at the level of an exception.
"""
ed = EditorModel(root=task)
child = task.children[1].children[1].children[0]
child.add_access_exception('t', 1)
assert 'simp3_t' in ed.nodes['root/comp1'].exceptions
assert 'simp3_t' in ed.nodes['root/comp1/comp2'].has_exceptions
child.name = 'ss'
assert 'ss_t' in ed.nodes['root/comp1'].exceptions
assert 'ss_t' in ed.nodes['root/comp1/comp2'].has_exceptions
parent = task.children[1]
parent.name = 'cc'
assert 'ss_t' in ed.nodes['root/cc'].exceptions
assert 'ss_t' in ed.nodes['root/cc/comp2'].has_exceptions
child.remove_access_exception('t')
assert 'ss_t' not in ed.nodes['root/cc'].exceptions
assert 'ss_t' not in ed.nodes['root/cc/comp2'].has_exceptions
# For coverage try removing all exceptions.
task.database.remove_access_exception('root/cc')
def test_handling_node_manipulation(task):
"""Test handling manipulation occuring on a node.
"""
ed = EditorModel(root=task)
cc = ComplexTask(name='cc')
task.add_child_task(0, cc)
assert 'root/cc' in ed.nodes
assert cc is ed.nodes['root'].children[0].task
task.remove_child_task(0)
assert 'root/cc' not in ed.nodes
# For coverage check that we could handle a list of changes
ed._react_to_nodes([('', '', '')])
# Test failing to find a task by path
with pytest.raises(ValueError):
ed._get_task('root/unknown')
def test_editor_widget(exopy_qtbot, task, dialog_sleep):
"""That the interaction with the editor widget makes sense.
"""
dialog_sleep = dialog_sleep or 1
def get_task_widget(editor):
return editor.page_widget().widgets()[0].scroll_widget()
def get_flow_area(widget):
return [w for w in widget.children if isinstance(w, FlowArea)][0]
def get_menu(task_widget, widget_index):
flow_area = task_widget.widgets()[0]
flow_item = flow_area.flow_items()[widget_index]
menu = flow_item.flow_widget().widgets()[0].children[0]
return menu
task_with_exs = task.children[1].children[1].children[0]
editor = DatabaseAccessEditor(declaration=Editor(id='exopy.database'),
selected_task=task)
window = PageTestingWindow(widget=editor)
window.show()
wait_for_window_displayed(exopy_qtbot, window)
exopy_qtbot.wait(dialog_sleep)
r_widget = get_task_widget(editor)
flow_area = get_flow_area(r_widget)
# Check that there is no contextual menu attached.
assert not [w for w in flow_area.flow_items()[0].flow_widget().children
if isinstance(w, Menu)]
# Ask the editor to hide its children by clicking the button (this does
# not check that the layout actually changed simply that is is correct)
r_widget.widgets()[-2].clicked = True
assert r_widget.widgets()[-1].visible is False
assert not r_widget.widgets()[-1].layout_constraints()
# Undo
r_widget.widgets()[-2].clicked = True
assert r_widget.widgets()[-1].visible is True
assert r_widget.widgets()[-1].layout_constraints()
# Add an access exception to the lowest level.
editor.selected_task = task.children[1].children[1]
exopy_qtbot.wait(10 + dialog_sleep)
widget = get_task_widget(editor)
add_ex_action = get_menu(widget, 0).items()[0]
add_ex_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 1
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception up
editor.selected_task = task.children[1]
def assert_flows():
assert len(flow_area.flow_items()) == 4
exopy_qtbot.wait_until(assert_flows)
exopy_qtbot.wait(dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
menu = get_menu(widget, -1)
assert len(menu.items()) == 2 # Check that both actions are there.
move_up_action = menu.items()[0]
move_up_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 2
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception down
editor.selected_task = task
def assert_flows():
assert len(flow_area.flow_items()) == 3
exopy_qtbot.wait_until(assert_flows)
exopy_qtbot.wait(dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
menu = get_menu(widget, -1)
assert len(menu.items()) == 1 # Check that only one action is there.
move_down_action = menu.items()[0]
move_down_action.triggered = True
def assert_access_exs():
assert task_with_exs.access_exs['t'] == 1
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Move the exception down (it disappears)
editor.selected_task = task.children[1]
exopy_qtbot.wait(10 + dialog_sleep)
widget = get_task_widget(editor)
flow_area = get_flow_area(widget)
assert len(flow_area.flow_items()) == 4
menu = get_menu(widget, -1)
move_down_action = menu.items()[1]
move_down_action.triggered = True
def assert_access_exs():
assert not task_with_exs.access_exs
exopy_qtbot.wait_until(assert_access_exs)
exopy_qtbot.wait(dialog_sleep)
# Destroy a task such that it leads to the destruction of a node
editor.selected_task = task
old_cache = editor._cache.copy()
task.remove_child_task(1)
def assert_cache():
assert len(editor._cache) == 1
exopy_qtbot.wait_until(assert_cache)
for node in old_cache:
editor.discard_view(node)
exopy_qtbot.wait(dialog_sleep)
| |
# modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Datatypes for managing stringlike data.
"""
import time, datetime
from zope.interface import implements
from modu.editable import IDatatype, define
from modu.util import form, tags, date
from modu.persist import sql
from modu import persist, assets
DAY = 86400
MONTH = DAY * 31
YEAR = DAY * 365
class CurrentDateField(define.definition):
"""
Display a checkbox that allows updating a date field with the current date.
"""
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
if(style == 'search'):
frm = form.FormNode(self.name)
return frm
elif(style == 'listing'):
frm = form.FormNode(self.name)
if(self.get('date_in_listing', True)):
if(output == ''):
output = '(none)'
frm(type='label', value=output)
else:
frm(type='checkbox', disabled=True, checked=bool(output))
return frm
elif(style == 'detail' and self.get('read_only', False)):
if(output == ''):
output = '(none)'
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
checked = False
if(storable.get_id() == 0 and self.get('default_checked', False)):
checked = True
frm = form.FormNode(self.name)(
type = 'checkbox',
# this is only True if default_checked is true and it's a new item
checked = checked,
suffix = ' ' + tags.small()[output],
)
if(bool(output)):
if(self.get('one_time', True)):
frm(attributes=dict(disabled='disabled'))
else:
frm(
text = ' ' + tags.small(_class='minor-help')['check to set current date']
)
return frm
def update_storable(self, req, form, storable):
if(form[self.name].attr('checked', False)):
value = datetime.datetime.now()
save_format = self.get('save_format', 'timestamp')
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
class DateField(define.definition):
"""
Allow editing of date data via a multiple select interface or javascript popup calendar.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(isinstance(value, (int, long, float))):
value = datetime.datetime.utcfromtimestamp(value)
if(style == 'search'):
frm = form.FormNode(self.name)
frm['from'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>from date:',
suffix=tags.br() + '</div>',
)
frm['to'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>to date:',
suffix='</div>',
)
return frm
elif(style == 'listing' or (style == 'detail' and self.get('read_only', False))):
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
current_year = datetime.datetime.now().year
if(value is not None):
current_year = getattr(value, 'year', current_year)
start_year = self.get('start_year', current_year - 2)
end_year = self.get('end_year', current_year + 5)
months, days = date.get_date_arrays()
frm = form.FormNode(self.name)
frm(type='fieldset', style='brief')
frm['null'](type='checkbox', text="no value", weight=-1, suffix=tags.br(),
attributes=dict(onChange='enableDateField(this);'))
assets.activate_jquery(req)
req.content.report('header', tags.script(type='text/javascript')["""
function enableDateField(checkboxField){
var formItem = $(checkboxField).parent().parent();
if($(checkboxField).attr('checked')){
formItem.children(':enabled').attr('disabled', true);
}
else{
formItem.children(':disabled').attr('disabled', false);
}
}
"""])
attribs = {}
if(value is None):
frm['null'](checked=True)
#attribs['disabled'] = None
if(self.get('default_now', False)):
value = datetime.datetime.now()
frm['null'](checked=False)
frm['date'](
type = self.get('style', 'datetime'),
value = value,
attributes = attribs,
suffix = tags.script(type="text/javascript")["""
enableDateField($('#form-item-%s input'));
""" % self.name],
)
frm.validate = self.validate
return frm
def validate(self, req, frm):
if(not frm[self.name]['date'].attr('value', '') and self.get('required', False)):
frm.set_error(self.name, 'You must enter a value for this field.')
return False
return True
def update_storable(self, req, form, storable):
"""
@see: L{modu.editable.define.definition.update_storable()}
"""
save_format = self.get('save_format', 'timestamp')
if(self.get('read_only')):
if(self.get('default_now', False) and not storable.get_id()):
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), int(time.time()))
else:
setattr(storable, self.get_column_name(), datetime.datetime.now())
return True
data = form[self.name]['date']
if(data.attr('null', 0)):
setattr(storable, self.get_column_name(), None)
return True
date_data = req.data[form.name][self.name].get('date', None)
# if it's not a dict, it must be None, or broken
if(isinstance(date_data, dict)):
value = date.get_dateselect_value(date_data, self.get('style', 'datetime'))
else:
value = None
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
def get_search_value(self, value, req, frm):
form_data = frm[self.name]
to_value = 0
from_value = 0
if not(value['to'].get('null')):
start_year = form_data['to']['date'].start_year
end_year = form_data['to']['date'].end_year
date_data = value['to'].get('date', None)
if(date_data):
to_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
to_value = time.mktime(to_value.timetuple())
if not(value['from'].get('null')):
start_year = form_data['from']['date'].start_year
end_year = form_data['from']['date'].end_year
date_data = value['from'].get('date', None)
if(date_data):
from_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
from_value = time.mktime(from_value.timetuple())
if(to_value and from_value):
if(self.get('save_format', 'timestamp') == 'datetime'):
return sql.RAW('UNIX_TIMESTAMP(%%s) BETWEEN %s AND %s' % (from_value, to_value))
else:
return sql.RAW('%%s BETWEEN %s AND %s' % (from_value, to_value))
elif(to_value):
return sql.LT(to_value)
elif(from_value):
return sql.GT(from_value)
else:
return None
| |
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| |
# This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
import re
import datetime
import tempfile
import time
try:
import pefile
import peutils
HAVE_PEFILE = True
except ImportError:
HAVE_PEFILE = False
try:
from modules.pehash.pehasher import calculate_pehash
HAVE_PEHASH = True
except ImportError:
HAVE_PEHASH = False
try:
from modules.verifysigs.verifysigs import get_auth_data
from verifysigs.asn1 import dn
HAVE_VERIFYSIGS= True
except ImportError:
HAVE_VERIFYSIGS = False
from viper.common.out import bold, table
from viper.common.abstracts import Module
from viper.common.utils import get_type, get_md5
from viper.core.database import Database
from viper.core.storage import get_sample_path
from viper.core.session import __sessions__
class PE(Module):
cmd = 'pe'
description = 'Extract information from PE32 headers'
authors = ['nex', 'Statixs']
def __init__(self):
super(PE, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
subparsers.add_parser('imports', help='List PE imports')
subparsers.add_parser('exports', help='List PE exports')
parser_res = subparsers.add_parser('resources', help='List PE resources')
parser_res.add_argument('-d', '--dump', metavar='folder', help='Destination directory to store resource files in')
parser_res.add_argument('-o', '--open', metavar='resource number', type=int, help='Open a session on the specified resource')
parser_res.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common resources')
parser_imp = subparsers.add_parser('imphash', help='Get and scan for imphash')
parser_imp.add_argument('-s', '--scan', action='store_true', help='Scan for all samples with same imphash')
parser_imp.add_argument('-c', '--cluster', action='store_true', help='Cluster repository by imphash (careful, could be massive)')
parser_comp = subparsers.add_parser('compiletime', help='Show the compiletime')
parser_comp.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common compile time')
parser_comp.add_argument('-w', '--window', type=int, help='Specify an optional time window in minutes')
parser_peid = subparsers.add_parser('peid', help='Show the PEiD signatures')
parser_peid.add_argument('-s', '--scan', action='store_true', help='Scan the repository for PEiD signatures')
parser_sec = subparsers.add_parser('security', help='Show digital signature')
parser_sec.add_argument('-d', '--dump', metavar='folder', help='Destination directory to store digital signature in')
parser_sec.add_argument('-a', '--all', action='store_true', help='Find all samples with a digital signature')
parser_sec.add_argument('-s', '--scan', action='store_true', help='Scan the repository for common certificates')
parser_sec.add_argument('-c', '--check', action='store_true', help='Check authenticode information')
parser_lang = subparsers.add_parser('language', help='Guess PE language')
parser_lang.add_argument('-s', '--scan', action='store_true', help='Scan the repository')
subparsers.add_parser('sections', help='List PE Sections')
parser_peh = subparsers.add_parser('pehash', help='Calculate the PEhash and compare them')
parser_peh.add_argument('-a', '--all', action='store_true', help='Prints the PEhash of all files in the project')
parser_peh.add_argument('-c', '--cluster', action='store_true', help='Calculate and cluster all files in the project')
parser_peh.add_argument('-s', '--scan', action='store_true', help='Scan repository for matching samples')
self.pe = None
def __check_session(self):
if not __sessions__.is_set():
self.log('error', "No session opened")
return False
if not self.pe:
try:
self.pe = pefile.PE(__sessions__.current.file.path)
except pefile.PEFormatError as e:
self.log('error', "Unable to parse PE file: {0}".format(e))
return False
return True
def imports(self):
if not self.__check_session():
return
if hasattr(self.pe, 'DIRECTORY_ENTRY_IMPORT'):
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
try:
self.log('info', "DLL: {0}".format(entry.dll))
for symbol in entry.imports:
self.log('item', "{0}: {1}".format(hex(symbol.address), symbol.name))
except:
continue
def exports(self):
if not self.__check_session():
return
self.log('info', "Exports:")
if hasattr(self.pe, 'DIRECTORY_ENTRY_EXPORT'):
for symbol in self.pe.DIRECTORY_ENTRY_EXPORT.symbols:
self.log('item', "{0}: {1} ({2})".format(hex(self.pe.OPTIONAL_HEADER.ImageBase + symbol.address), symbol.name, symbol.ordinal))
def compiletime(self):
def get_compiletime(pe):
return datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)
if not self.__check_session():
return
compile_time = get_compiletime(self.pe)
self.log('info', "Compile Time: {0}".format(bold(compile_time)))
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
cur_compile_time = get_compiletime(cur_pe)
except:
continue
if compile_time == cur_compile_time:
matches.append([sample.name, sample.md5, cur_compile_time])
else:
if self.args.window:
if cur_compile_time > compile_time:
delta = (cur_compile_time - compile_time)
elif cur_compile_time < compile_time:
delta = (compile_time - cur_compile_time)
delta_minutes = int(delta.total_seconds()) / 60
if delta_minutes <= self.args.window:
matches.append([sample.name, sample.md5, cur_compile_time])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Compile Time'], rows=matches))
def peid(self):
def get_signatures():
with file('data/peid/UserDB.TXT', 'rt') as f:
sig_data = f.read()
signatures = peutils.SignatureDatabase(data=sig_data)
return signatures
def get_matches(pe, signatures):
matches = signatures.match_all(pe, ep_only=True)
return matches
if not self.__check_session():
return
signatures = get_signatures()
peid_matches = get_matches(self.pe, signatures)
if peid_matches:
self.log('info', "PEiD Signatures:")
for sig in peid_matches:
if type(sig) is list:
self.log('item', sig[0])
else:
self.log('item', sig)
else:
self.log('info', "No PEiD signatures matched.")
if self.args.scan and peid_matches:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
cur_peid_matches = get_matches(cur_pe, signatures)
except:
continue
if peid_matches == cur_peid_matches:
matches.append([sample.name, sample.sha256])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
def resources(self):
# Use this function to retrieve resources for the given PE instance.
# Returns all the identified resources with indicators and attributes.
def get_resources(pe):
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
count = 1
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
try:
resource = {}
if resource_type.name is not None:
name = str(resource_type.name)
else:
name = str(pefile.RESOURCE_TYPE.get(resource_type.struct.Id))
if name is None:
name = str(resource_type.struct.Id)
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
filetype = get_type(data)
md5 = get_md5(data)
language = pefile.LANG.get(resource_lang.data.lang, None)
sublanguage = pefile.get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)
offset = ('%-8s' % hex(resource_lang.data.struct.OffsetToData)).strip()
size = ('%-8s' % hex(resource_lang.data.struct.Size)).strip()
resource = [count, name, offset, md5, size, filetype, language, sublanguage]
# Dump resources if requested to and if the file currently being
# processed is the opened session file.
# This is to avoid that during a --scan all the resources being
# scanned are dumped as well.
if (self.args.open or self.args.dump) and pe == self.pe:
if self.args.dump:
folder = self.args.dump
else:
folder = tempfile.mkdtemp()
resource_path = os.path.join(folder, '{0}_{1}_{2}'.format(__sessions__.current.file.md5, offset, name))
resource.append(resource_path)
with open(resource_path, 'wb') as resource_handle:
resource_handle.write(data)
resources.append(resource)
count += 1
except Exception as e:
self.log('error', e)
continue
return resources
if not self.__check_session():
return
# Obtain resources for the currently opened file.
resources = get_resources(self.pe)
if not resources:
self.log('warning', "No resources found")
return
headers = ['#', 'Name', 'Offset', 'MD5', 'Size', 'File Type', 'Language', 'Sublanguage']
if self.args.dump or self.args.open:
headers.append('Dumped To')
print table(headers, resources)
# If instructed, open a session on the given resource.
if self.args.open:
for resource in resources:
if resource[0] == self.args.open:
__sessions__.new(resource[8])
return
# If instructed to perform a scan across the repository, start looping
# through all available files.
elif self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
# Retrieve list of samples stored locally and available in the
# database.
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
# Skip if it's the same file.
if sample.sha256 == __sessions__.current.file.sha256:
continue
# Obtain path to the binary.
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
# Open PE instance.
try:
cur_pe = pefile.PE(sample_path)
except:
continue
# Obtain the list of resources for the current iteration.
cur_resources = get_resources(cur_pe)
matched_resources = []
# Loop through entry's resources.
for cur_resource in cur_resources:
# Loop through opened file's resources.
for resource in resources:
# If there is a common resource, add it to the list.
if cur_resource[3] == resource[3]:
matched_resources.append(resource[3])
# If there are any common resources, add the entry to the list
# of matched samples.
if len(matched_resources) > 0:
matches.append([sample.name, sample.md5, '\n'.join(r for r in matched_resources)])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Resource MD5'], rows=matches))
def imphash(self):
if self.args.scan and self.args.cluster:
self.log('error', "You selected two exclusive options, pick one")
return
if self.args.cluster:
self.log('info', "Clustering all samples by imphash...")
db = Database()
samples = db.find(key='all')
cluster = {}
for sample in samples:
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_imphash = pefile.PE(sample_path).get_imphash()
except:
continue
if cur_imphash not in cluster:
cluster[cur_imphash] = []
cluster[cur_imphash].append([sample.sha256, sample.name])
for key, value in cluster.items():
# Skipping clusters with only one entry.
if len(value) == 1:
continue
self.log('info', "Imphash cluster {0}".format(bold(key)))
for entry in value:
self.log('item', "{0} [{1}]".format(entry[0], entry[1]))
self.log('', "")
return
if self.__check_session():
try:
imphash = self.pe.get_imphash()
except AttributeError:
self.log('error', "No imphash support, upgrade pefile to a version >= 1.2.10-139 (`pip install --upgrade pefile`)")
return
self.log('info', "Imphash: {0}".format(bold(imphash)))
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_imphash = pefile.PE(sample_path).get_imphash()
except:
continue
if imphash == cur_imphash:
matches.append([sample.name, sample.sha256])
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
def security(self):
def get_certificate(pe):
# TODO: this only extract the raw list of certificate data.
# I need to parse them, extract single certificates and perhaps return
# the PEM data of the first certificate only.
pe_security_dir = pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']
address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].VirtualAddress
# size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pe_security_dir].Size
if address:
return pe.write()[address + 8:]
else:
return None
def get_signed_samples(current=None, cert_filter=None):
db = Database()
samples = db.find(key='all')
results = []
for sample in samples:
# Skip if it's the same file.
if current:
if sample.sha256 == current:
continue
# Obtain path to the binary.
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
# Open PE instance.
try:
cur_pe = pefile.PE(sample_path)
except:
continue
cur_cert_data = get_certificate(cur_pe)
if not cur_cert_data:
continue
cur_cert_md5 = get_md5(cur_cert_data)
if cert_filter:
if cur_cert_md5 == cert_filter:
results.append([sample.name, sample.md5])
else:
results.append([sample.name, sample.md5, cur_cert_md5])
return results
if self.args.all:
self.log('info', "Scanning the repository for all signed samples...")
all_of_them = get_signed_samples()
self.log('info', "{0} signed samples found".format(bold(len(all_of_them))))
if len(all_of_them) > 0:
self.log('table', dict(header=['Name', 'MD5', 'Cert MD5'], rows=all_of_them))
return
if not self.__check_session():
return
cert_data = get_certificate(self.pe)
if not cert_data:
self.log('warning', "No certificate found")
return
cert_md5 = get_md5(cert_data)
self.log('info', "Found certificate with MD5 {0}".format(bold(cert_md5)))
if self.args.dump:
cert_path = os.path.join(self.args.dump, '{0}.crt'.format(__sessions__.current.file.sha256))
with open(cert_path, 'wb+') as cert_handle:
cert_handle.write(cert_data)
self.log('info', "Dumped certificate to {0}".format(cert_path))
self.log('info', "You can parse it using the following command:\n\t" +
bold("openssl pkcs7 -inform DER -print_certs -text -in {0}".format(cert_path)))
# TODO: do scan for certificate's serial number.
if self.args.scan:
self.log('info', "Scanning the repository for matching signed samples...")
matches = get_signed_samples(current=__sessions__.current.file.sha256, cert_filter=cert_md5)
self.log('info', "{0} relevant matches found".format(bold(len(matches))))
if len(matches) > 0:
self.log('table', dict(header=['Name', 'SHA256'], rows=matches))
# TODO: this function needs to be better integrated with the rest of the command.
# TODO: need to add more error handling and figure out why so many samples are failing.
if self.args.check:
if not HAVE_VERIFYSIGS:
self.log('error', "Dependencies missing for authenticode validation. Please install M2Crypto and pyasn1 (`pip install pyasn1 M2Crypto`)")
return
try:
auth, computed_content_hash = get_auth_data(__sessions__.current.file.path)
except Exception as e:
self.log('error', "Unable to parse PE certificate: {0}".format(str(e)))
return
try:
auth.ValidateAsn1()
auth.ValidateHashes(computed_content_hash)
auth.ValidateSignatures()
auth.ValidateCertChains(time.gmtime())
except Exception, e:
self.log('error', "Unable to validate PE certificate: {0}".format(str(e)))
return
self.log('info', bold('Signature metadata:'))
self.log('info', 'Program name: {0}'.format(auth.program_name))
self.log('info', 'URL: {0}'.format(auth.program_url))
if auth.has_countersignature:
self.log('info', bold('Countersignature is present. Timestamp: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_timestamp)))))
else:
self.log('info', bold('Countersignature is not present.'))
self.log('info', bold('Binary is signed with cert issued by:'))
self.log('info', '{0}'.format(auth.signing_cert_id[0]))
self.log('info', '{0}'.format(auth.cert_chain_head[2][0]))
self.log('info', 'Chain not before: {0} UTC'.format(
time.asctime(time.gmtime(auth.cert_chain_head[0]))))
self.log('info', 'Chain not after: {0} UTC'.format(
time.asctime(time.gmtime(auth.cert_chain_head[1]))))
if auth.has_countersignature:
self.log('info', bold('Countersig chain head issued by:'))
self.log('info', '{0}'.format(auth.counter_chain_head[2]))
self.log('info', 'Countersig not before: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_chain_head[0]))))
self.log('info', 'Countersig not after: {0} UTC'.format(
time.asctime(time.gmtime(auth.counter_chain_head[1]))))
self.log('info', bold('Certificates:'))
for (issuer, serial), cert in auth.certificates.items():
self.log('info', 'Issuer: {0}'.format(issuer))
self.log('info', 'Serial: {0}'.format(serial))
subject = cert[0][0]['subject']
subject_dn = str(dn.DistinguishedName.TraverseRdn(subject[0]))
self.log('info', 'Subject: {0}'.format(subject_dn))
not_before = cert[0][0]['validity']['notBefore']
not_after = cert[0][0]['validity']['notAfter']
not_before_time = not_before.ToPythonEpochTime()
not_after_time = not_after.ToPythonEpochTime()
self.log('info', 'Not Before: {0} UTC ({1})'.format(
time.asctime(time.gmtime(not_before_time)), not_before[0]))
self.log('info', 'Not After: {0} UTC ({1})'.format(
time.asctime(time.gmtime(not_after_time)), not_after[0]))
if auth.trailing_data:
self.log('info', 'Signature Blob had trailing (unvalidated) data ({0} bytes): {1}'.format(
len(auth.trailing_data), auth.trailing_data.encode('hex')))
def language(self):
def get_iat(pe):
iat = []
if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
for peimport in pe.DIRECTORY_ENTRY_IMPORT:
iat.append(peimport.dll)
return iat
def check_module(iat, match):
for imp in iat:
if imp.find(match) != -1:
return True
return False
def is_cpp(data, cpp_count):
for line in data:
if 'type_info' in line or 'RTTI' in line:
cpp_count += 1
break
if cpp_count == 2:
return True
return False
def is_delphi(data):
for line in data:
if 'Borland' in line:
path = line.split('\\')
for p in path:
if 'Delphi' in p:
return True
return False
def is_vbdotnet(data):
for line in data:
if 'Compiler' in line:
stuff = line.split('.')
if 'VisualBasic' in stuff:
return True
return False
def is_autoit(data):
for line in data:
if 'AU3!' in line:
return True
return False
def is_packed(pe):
for section in pe.sections:
if section.get_entropy() > 7:
return True
return False
def get_strings(content):
regexp = '[\x30-\x39\x41-\x5f\x61-\x7a\-\.:]{4,}'
return re.findall(regexp, content)
def find_language(iat, sample, content):
dotnet = False
cpp_count = 0
found = None
# VB check
if check_module(iat, 'VB'):
self.log('info', "{0} - Possible language: Visual Basic".format(sample.name))
return True
# .NET check
if check_module(iat, 'mscoree.dll') and not found:
dotnet = True
found = '.NET'
# C DLL check
if not found and (check_module(iat, 'msvcr') or check_module(iat, 'MSVCR') or check_module(iat, 'c++')):
cpp_count += 1
if not found:
data = get_strings(content)
if is_cpp(data, cpp_count) and not found:
found = 'CPP'
if not found and cpp_count == 1:
found = 'C'
if not dotnet and is_delphi(data) and not found:
found = 'Delphi'
if dotnet and is_vbdotnet(data):
found = 'Visual Basic .NET'
if is_autoit(data) and not found:
found = 'AutoIt'
return found
if not self.__check_session():
return
if is_packed(self.pe):
self.log('warning', "Probably packed, the language guess might be unreliable")
language = find_language(
get_iat(self.pe),
__sessions__.current.file,
__sessions__.current.file.data
)
if language:
self.log('info', "Probable language: {0}".format(bold(language)))
else:
self.log('error', "Programming language not identified")
return
if self.args.scan:
self.log('info', "Scanning the repository for matching samples...")
db = Database()
samples = db.find(key='all')
matches = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
sample_path = get_sample_path(sample.sha256)
if not os.path.exists(sample_path):
continue
try:
cur_pe = pefile.PE(sample_path)
except pefile.PEFormatError as e:
continue
cur_packed = ''
if is_packed(cur_pe):
cur_packed = 'Yes'
cur_language = find_language(
get_iat(cur_pe),
sample,
open(sample_path, 'rb').read()
)
if not cur_language:
continue
if cur_language == language:
matches.append([sample.name, sample.md5, cur_packed])
if matches:
self.log('table', dict(header=['Name', 'MD5', 'Is Packed'], rows=matches))
else:
self.log('info', "No matches found")
def sections(self):
if not self.__check_session():
return
rows = []
for section in self.pe.sections:
rows.append([
section.Name,
hex(section.VirtualAddress),
hex(section.Misc_VirtualSize),
section.SizeOfRawData,
section.get_entropy()
])
self.log('info', "PE Sections:")
self.log('table', dict(header=['Name', 'RVA', 'VirtualSize', 'RawDataSize', 'Entropy'], rows=rows))
def pehash(self):
if not HAVE_PEHASH:
self.log('error', "PEhash is missing. Please copy PEhash to the modules directory of Viper")
return
current_pehash = None
if __sessions__.is_set():
current_pehash = calculate_pehash(__sessions__.current.file.path)
self.log('info', "PEhash: {0}".format(bold(current_pehash)))
if self.args.all or self.args.cluster or self.args.scan:
db = Database()
samples = db.find(key='all')
rows = []
for sample in samples:
sample_path = get_sample_path(sample.sha256)
pe_hash = calculate_pehash(sample_path)
if pe_hash:
rows.append((sample.name, sample.md5, pe_hash))
if self.args.all:
self.log('info', "PEhash for all files:")
header = ['Name', 'MD5', 'PEhash']
self.log('table', dict(header=header, rows=rows))
elif self.args.cluster:
self.log('info', "Clustering files by PEhash...")
cluster = {}
for sample_name, sample_md5, pe_hash in rows:
cluster.setdefault(pe_hash, []).append([sample_name, sample_md5])
for item in cluster.items():
if len(item[1]) > 1:
self.log('info', "PEhash {0} was calculated on files:".format(bold(item[0])))
self.log('table', dict(header=['Name', 'MD5'], rows=item[1]))
elif self.args.scan:
if __sessions__.is_set() and current_pehash:
self.log('info', "Finding matching samples...")
matches = []
for row in rows:
if row[1] == __sessions__.current.file.md5:
continue
if row[2] == current_pehash:
matches.append([row[0], row[1]])
if matches:
self.log('table', dict(header=['Name', 'MD5'], rows=matches))
else:
self.log('info', "No matches found")
def run(self):
super(PE, self).run()
if self.args is None:
return
if not HAVE_PEFILE:
self.log('error', "Missing dependency, install pefile (`pip install pefile`)")
return
if self.args.subname == 'imports':
self.imports()
elif self.args.subname == 'exports':
self.exports()
elif self.args.subname == 'resources':
self.resources()
elif self.args.subname == 'imphash':
self.imphash()
elif self.args.subname == 'compiletime':
self.compiletime()
elif self.args.subname == 'peid':
self.peid()
elif self.args.subname == 'security':
self.security()
elif self.args.subname == 'sections':
self.sections()
elif self.args.subname == 'language':
self.language()
elif self.args.subname == 'pehash':
self.pehash()
| |
"""
* Copyright 2008 Fred Sauer
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
from pyjamas.ui.AbsolutePanel import AbsolutePanel
from pyjamas.ui.Widget import Widget
from pyjamas.dnd.drop import BoundaryDropController
from pyjamas.dnd.drop import DropController
"""*
* {@link DragController} which performs the bare essentials such as
* adding/removing styles, maintaining collections, adding mouse listeners, etc.
*
* <p>
* Extend this class to implement specialized drag capabilities such table
* column or panel resizing. For classic drag-and-drop functionality, i.e. the
* ability to pickup, move around and drop widgets, use
* {@link PickupDragController}.
* </p>
"""
class AbstractDragController implements DragController:
"""*
* @deprecated Instead selectively use your own CSS classes.
"""
self.CSS_DRAGGABLE
"""*
* @deprecated Instead selectively use your own CSS classes.
"""
self.CSS_DRAGGING
"""*
* @deprecated Instead selectively use your own CSS classes.
"""
self.CSS_HANDLE
self.CSS_SELECTED = "dragdrop-selected"
dragHandles = HashMap()
self.PRIVATE_CSS_DRAGGABLE = "dragdrop-draggable"
self.PRIVATE_CSS_DRAGGING = "dragdrop-dragging"
self.PRIVATE_CSS_HANDLE = "dragdrop-handle"
def setVersion(self):
self.CSS_DRAGGABLE = PRIVATE_CSS_DRAGGABLE
self.CSS_DRAGGING = PRIVATE_CSS_DRAGGING
self.CSS_HANDLE = PRIVATE_CSS_HANDLE
def setVersion(self):
JS("""
$GWT_DND_VERSION = "2.0.7";
""")
self.context
self.boundaryPanel
self.constrainedToBoundaryPanel=False
self.dragEndEvent
self.dragHandlers
self.dragStartEvent
self.dragStartPixels=0
self.mouseDragHandler
self.multipleSelectionAllowed = False
"""*
* Create a drag-and-drop controller. Drag operations will be limited to
* the specified boundary panel.
*
* @param boundaryPanel the desired boundary panel or <code>RootPanel.get()</code>
* if entire document body is to be the boundary
"""
def __init__(self, boundaryPanel):
assert boundaryPanel is not None : "Use 'RootPanel.get()' instead of 'None'."
self.boundaryPanel = boundaryPanel
context = DragContext(this)
mouseDragHandler = MouseDragHandler(context)
def addDragHandler(self, handler):
if dragHandlers is None:
dragHandlers = DragHandlerCollection()
dragHandlers.add(handler)
def clearSelection(self):
for Iterator iterator = context.selectedWidgets.iterator(); iterator.hasNext():
widget = (Widget) iterator.next()
widget.removeStyleName(CSS_SELECTED)
iterator.remove()
def dragEnd(self):
context.draggable.removeStyleName(PRIVATE_CSS_DRAGGING)
if dragHandlers is not None:
dragHandlers.fireDragEnd(dragEndEvent)
dragEndEvent = None
assert dragEndEvent is None
def dragEnd(self, draggable, dropTarget):
raise UnsupportedOperationException()
def dragStart(self):
resetCache()
if dragHandlers is not None:
dragHandlers.fireDragStart(dragStartEvent)
dragStartEvent = None
context.draggable.addStyleName(PRIVATE_CSS_DRAGGING)
assert dragStartEvent is None
def dragStart(self, draggable):
raise UnsupportedOperationException()
def getBehaviorConstrainedToBoundaryPanel(self):
return constrainedToBoundaryPanel
def getBehaviorDragStartSensitivity(self):
return dragStartPixels
def getBehaviorMultipleSelection(self):
return multipleSelectionAllowed
def getBoundaryPanel(self):
return boundaryPanel
def getDropControllerCollection(self):
raise UnsupportedOperationException()
def getIntersectDropController(self, widget):
raise UnsupportedOperationException()
def getIntersectDropController(self, widget, x, y):
raise UnsupportedOperationException()
def getMovableWidget(self):
raise UnsupportedOperationException()
"""*
* Attaches a {@link MouseDragHandler} (which is a
* {@link com.google.gwt.user.client.ui.MouseListener}) to the widget,
* applies the {@link #PRIVATE_CSS_DRAGGABLE} style to the draggable, applies the
* {@link #PRIVATE_CSS_HANDLE} style to the handle.
*
* @see #makeDraggable(Widget, Widget)
* @see HasDragHandle
*
* @param draggable the widget to be made draggable
"""
def makeDraggable(self, draggable):
if draggable instanceof HasDragHandle:
makeDraggable(draggable, ((HasDragHandle) draggable).getDragHandle())
else:
makeDraggable(draggable, draggable)
"""*
* Similar to {@link #makeDraggable(Widget)}, but allow separate, child to be
* specified as the drag handle by which the first widget can be dragged.
*
* @param draggable the widget to be made draggable
* @param dragHandle the widget by which widget can be dragged
"""
def makeDraggable(self, draggable, dragHandle):
mouseDragHandler.makeDraggable(draggable, dragHandle)
draggable.addStyleName(PRIVATE_CSS_DRAGGABLE)
dragHandle.addStyleName(PRIVATE_CSS_HANDLE)
dragHandles.put(draggable, dragHandle)
"""*
* Performs the reverse of {@link #makeDraggable(Widget)}, detaching the
* {@link MouseDragHandler} from the widget and removing any styling which was
* applied when making the widget draggable.
*
* @param draggable the widget to no longer be draggable
"""
def makeNotDraggable(self, draggable):
Widget dragHandle = (Widget) dragHandles.remove(draggable)
mouseDragHandler.makeNotDraggable(dragHandle)
draggable.removeStyleName(PRIVATE_CSS_DRAGGABLE)
dragHandle.removeStyleName(PRIVATE_CSS_HANDLE)
def notifyDragEnd(self, dragEndEvent):
raise UnsupportedOperationException()
def previewDragEnd() throws VetoDragException {
assert dragEndEvent is None
if dragHandlers is not None:
dragEndEvent = DragEndEvent(context)
dragHandlers.firePreviewDragEnd(dragEndEvent)
def previewDragEnd(Widget draggable, Widget dropTarget) throws VetoDragException {
raise UnsupportedOperationException()
def previewDragStart() throws VetoDragException {
assert dragStartEvent is None
if dragHandlers is not None:
dragStartEvent = DragStartEvent(context)
dragHandlers.firePreviewDragStart(dragStartEvent)
def previewDragStart(Widget draggable) throws VetoDragException {
raise UnsupportedOperationException()
def removeDragHandler(self, handler):
if dragHandlers is not None:
dragHandlers.remove(handler)
def resetCache(self):
def setBehaviorConstrainedToBoundaryPanel(self, constrainedToBoundaryPanel):
self.constrainedToBoundaryPanel = constrainedToBoundaryPanel
def setBehaviorDragStartSensitivity(self, pixels):
assert pixels >= 0
dragStartPixels = pixels
def setBehaviorMultipleSelection(self, multipleSelectionAllowed):
self.multipleSelectionAllowed = multipleSelectionAllowed
for Iterator iterator = context.selectedWidgets.iterator(); iterator.hasNext();:
Widget widget = (Widget) iterator.next()
widget.removeStyleName(CSS_SELECTED)
iterator.remove()
def setConstrainWidgetToBoundaryPanel(self, constrainWidgetToBoundaryPanel):
setBehaviorConstrainedToBoundaryPanel(constrainWidgetToBoundaryPanel)
def toggleSelection(self, draggable):
assert draggable is not None
if context.selectedWidgets.remove(draggable):
draggable.removeStyleName(CSS_SELECTED)
elif multipleSelectionAllowed:
context.selectedWidgets.add(draggable)
draggable.addStyleName(CSS_SELECTED)
else:
context.selectedWidgets.clear()
context.selectedWidgets.add(draggable)
"""*
* @deprecated Use {@link PickupDragController#newBoundaryDropController(AbsolutePanel, boolean)} instead.
"""
def newBoundaryDropController(self):
raise UnsupportedOperationException()
"""*
* @deprecated Use {@link PickupDragController#newBoundaryDropController(AbsolutePanel, boolean)} instead.
"""
BoundaryDropController newBoundaryDropController(AbsolutePanel boundaryPanel,
boolean allowDropping) {
raise UnsupportedOperationException()
"""*
* @deprecated Use {@link PickupDragController#restoreSelectedWidgetsLocation()} instead.
"""
def restoreDraggableLocation(self, draggable):
raise UnsupportedOperationException()
"""*
* @deprecated Use {@link PickupDragController#restoreSelectedWidgetsStyle()} instead.
"""
def restoreDraggableStyle(self, draggable):
raise UnsupportedOperationException()
"""*
* @deprecated Use {@link PickupDragController#saveSelectedWidgetsLocationAndStyle()} instead.
"""
def saveDraggableLocationAndStyle(self, draggable):
raise UnsupportedOperationException()
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "niftynet/utilities/versioneer_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
# -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Copyright 2011 Cloudant, Inc.
import re
import six
import math
import time
import logging
import threading
import bucky2.udpserver as udpserver
log = logging.getLogger(__name__)
class StatsDHandler(threading.Thread):
def __init__(self, queue, flush_time=10):
super(StatsDHandler, self).__init__()
self.daemon = True
self.queue = queue
self.lock = threading.Lock()
self.timers = {}
self.gauges = {}
self.counters = {}
self.flush_time = flush_time
self.key_res = (
(re.compile("\s+"), "_"),
(re.compile("\/"), "-"),
(re.compile("[^a-zA-Z_\-0-9\.]"), "")
)
def run(self):
while True:
time.sleep(self.flush_time)
stime = int(time.time())
with self.lock:
num_stats = self.enqueue_timers(stime)
num_stats += self.enqueue_counters(stime)
num_stats += self.enqueue_gauges(stime)
self.enqueue("stats.numStats", num_stats, stime)
def enqueue(self, name, stat, stime):
# No hostnames on statsd
self.queue.put((None, name, stat, stime))
def enqueue_timers(self, stime):
ret = 0
iteritems = self.timers.items() if six.PY3 else self.timers.iteritems()
for k, v in iteritems:
# Skip timers that haven't collected any values
if not v:
continue
v.sort()
pct_thresh = 90
count = len(v)
vmin, vmax = v[0], v[-1]
mean, vthresh = vmin, vmax
if count > 1:
thresh_idx = int(math.floor(pct_thresh / 100.0 * count))
v = v[:thresh_idx]
vthresh = v[-1]
vsum = sum(v)
mean = vsum / float(len(v))
self.enqueue("stats.timers.%s.mean" % k, mean, stime)
self.enqueue("stats.timers.%s.upper" % k, vmax, stime)
t = int(pct_thresh)
self.enqueue("stats.timers.%s.upper_%s" % (k, t), vthresh, stime)
self.enqueue("stats.timers.%s.lower" % k, vmin, stime)
self.enqueue("stats.timers.%s.count" % k, count, stime)
self.timers[k] = []
ret += 1
return ret
def enqueue_gauges(self, stime):
ret = 0
iteritems = self.gauges.items() if six.PY3 else self.gauges.iteritems()
for k, v in iteritems:
self.enqueue("stats.gauges.%s" % k, v, stime)
ret += 1
return ret
def enqueue_counters(self, stime):
ret = 0
iteritems = self.counters.items() if six.PY3 else self.counters.iteritems()
for k, v in iteritems:
self.enqueue("stats.%s" % k, v / self.flush_time, stime)
self.enqueue("stats_counts.%s" % k, v, stime)
self.counters[k] = 0
ret += 1
return ret
def handle(self, data):
# Adding a bit of extra sauce so clients can
# send multiple samples in a single UDP
# packet.
for line in data.splitlines():
self.line = line
if not line.strip():
continue
self.handle_line(line)
def handle_line(self, line):
bits = line.split(":")
key = self.handle_key(bits.pop(0))
if not bits:
self.bad_line()
return
# I'm not sure if statsd is doing this on purpose
# but the code allows for name:v1|t1:v2|t2 etc etc.
# In the interest of compatibility, I'll maintain
# the behavior.
for sample in bits:
if not "|" in sample:
self.bad_line()
continue
fields = sample.split("|")
if fields[1] == "ms":
self.handle_timer(key, fields)
elif fields[1] == "g":
self.handle_gauge(key, fields)
else:
self.handle_counter(key, fields)
def handle_key(self, key):
for (rexp, repl) in self.key_res:
key = rexp.sub(repl, key)
return key
def handle_timer(self, key, fields):
try:
val = float(fields[0] or 0)
with self.lock:
self.timers.setdefault(key, []).append(val)
except:
self.bad_line()
def handle_gauge(self, key, fields):
valstr = fields[0] or "0"
try:
val = float(valstr)
except:
self.bad_line()
return
delta = valstr[0] in ["+", "-"]
with self.lock:
if delta and key in self.gauges:
self.gauges[key] = self.gauges[key] + val
else:
self.gauges[key] = val
def handle_counter(self, key, fields):
rate = 1.0
if len(fields) > 2 and fields[2][:1] == "@":
try:
rate = float(fields[2][1:].strip())
except:
rate = 1.0
try:
val = int(float(fields[0] or 0) / rate)
except:
self.bad_line()
return
with self.lock:
if key not in self.counters:
self.counters[key] = 0
self.counters[key] += val
def bad_line(self):
log.error("StatsD: Invalid line: '%s'", self.line.strip())
class StatsDServer(udpserver.UDPServer):
def __init__(self, queue, cfg):
super(StatsDServer, self).__init__(cfg.statsd_ip, cfg.statsd_port)
self.handler = StatsDHandler(queue, flush_time=cfg.statsd_flush_time)
def run(self):
self.handler.start()
super(StatsDServer, self).run()
if six.PY3:
def handle(self, data, addr):
self.handler.handle(data.decode())
if not self.handler.is_alive():
return False
return True
else:
def handle(self, data, addr):
self.handler.handle(data)
if not self.handler.is_alive():
return False
return True
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
from sqlalchemy.orm import exc
from neutron.common import exceptions as q_exc
import neutron.db.api as db_api
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.plugins.hyperv.common import constants
from neutron.plugins.hyperv import model as hyperv_model
LOG = logging.getLogger(__name__)
class HyperVPluginDB(object):
def initialize(self):
db_api.configure_db()
def reserve_vlan(self, session):
with session.begin(subtransactions=True):
alloc_q = session.query(hyperv_model.VlanAllocation)
alloc_q = alloc_q.filter_by(allocated=False)
alloc = alloc_q.first()
if alloc:
LOG.debug(_("Reserving vlan %(vlan_id)s on physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': alloc.physical_network})
alloc.allocated = True
return (alloc.physical_network, alloc.vlan_id)
raise q_exc.NoNetworkAvailable()
def reserve_flat_net(self, session):
with session.begin(subtransactions=True):
alloc_q = session.query(hyperv_model.VlanAllocation)
alloc_q = alloc_q.filter_by(allocated=False,
vlan_id=constants.FLAT_VLAN_ID)
alloc = alloc_q.first()
if alloc:
LOG.debug(_("Reserving flat physical network "
"%(physical_network)s from pool"),
{'physical_network': alloc.physical_network})
alloc.allocated = True
return alloc.physical_network
raise q_exc.NoNetworkAvailable()
def reserve_specific_vlan(self, session, physical_network, vlan_id):
with session.begin(subtransactions=True):
try:
alloc_q = session.query(hyperv_model.VlanAllocation)
alloc_q = alloc_q.filter_by(
physical_network=physical_network,
vlan_id=vlan_id)
alloc = alloc_q.one()
if alloc.allocated:
if vlan_id == constants.FLAT_VLAN_ID:
raise q_exc.FlatNetworkInUse(
physical_network=physical_network)
else:
raise q_exc.VlanIdInUse(
vlan_id=vlan_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
alloc.allocated = True
except exc.NoResultFound:
raise q_exc.NoNetworkAvailable()
def reserve_specific_flat_net(self, session, physical_network):
return self.reserve_specific_vlan(session, physical_network,
constants.FLAT_VLAN_ID)
def add_network_binding(self, session, network_id, network_type,
physical_network, segmentation_id):
with session.begin(subtransactions=True):
binding = hyperv_model.NetworkBinding(
network_id, network_type,
physical_network,
segmentation_id)
session.add(binding)
def get_port(self, port_id):
session = db_api.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
except exc.NoResultFound:
port = None
return port
def get_network_binding(self, session, network_id):
session = session or db_api.get_session()
try:
binding_q = session.query(hyperv_model.NetworkBinding)
binding_q = binding_q.filter_by(network_id=network_id)
return binding_q.one()
except exc.NoResultFound:
return
def set_port_status(self, port_id, status):
session = db_api.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
def release_vlan(self, session, physical_network, vlan_id):
with session.begin(subtransactions=True):
try:
alloc_q = session.query(hyperv_model.VlanAllocation)
alloc_q = alloc_q.filter_by(physical_network=physical_network,
vlan_id=vlan_id)
alloc = alloc_q.one()
alloc.allocated = False
#session.delete(alloc)
LOG.debug(_("Releasing vlan %(vlan_id)s on physical network "
"%(physical_network)s"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
except exc.NoResultFound:
LOG.warning(_("vlan_id %(vlan_id)s on physical network "
"%(physical_network)s not found"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
def _add_missing_allocatable_vlans(self, session, vlan_ids,
physical_network):
for vlan_id in sorted(vlan_ids):
alloc = hyperv_model.VlanAllocation(
physical_network, vlan_id)
session.add(alloc)
def _remove_non_allocatable_vlans(self, session,
physical_network,
vlan_ids,
allocations):
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_(
"Removing vlan %(vlan_id)s on "
"physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': physical_network})
session.delete(alloc)
del allocations[physical_network]
def _remove_unconfigured_vlans(self, session, allocations):
for allocs in allocations.itervalues():
for alloc in allocs:
if not alloc.allocated:
LOG.debug(_("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': alloc.physical_network})
session.delete(alloc)
def sync_vlan_allocations(self, network_vlan_ranges):
"""Synchronize vlan_allocations table with configured VLAN ranges."""
session = db_api.get_session()
with session.begin():
# get existing allocations for all physical networks
allocations = dict()
allocs_q = session.query(hyperv_model.VlanAllocation)
for alloc in allocs_q:
allocations.setdefault(alloc.physical_network,
set()).add(alloc)
# process vlan ranges for each configured physical network
for physical_network, vlan_ranges in network_vlan_ranges.items():
# determine current configured allocatable vlans for this
# physical network
vlan_ids = set()
for vlan_range in vlan_ranges:
vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1))
# remove from table unallocated vlans not currently allocatable
self._remove_non_allocatable_vlans(session,
physical_network,
vlan_ids,
allocations)
# add missing allocatable vlans to table
self._add_missing_allocatable_vlans(session, vlan_ids,
physical_network)
# remove from table unallocated vlans for any unconfigured physical
# networks
self._remove_unconfigured_vlans(session, allocations)
| |
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
import jsonpath_rw
from oslo_config import cfg
from oslo_utils import timeutils
import six
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
cfg.MultiStrOpt('store_raw',
default=[],
help='Store the raw notification for select priority '
'levels (info and/or error). By default, raw details are '
'not captured.')
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw]
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
raw = (notification_body
if notification_body.get('priority') in self.raw_levels else {})
event = models.Event(message_id, event_type, when, traits, raw)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnclientlessaccessprofile(base_resource) :
""" Configuration for Clientless VPN rewrite profile resource. """
def __init__(self) :
self._profilename = ""
self._urlrewritepolicylabel = ""
self._javascriptrewritepolicylabel = ""
self._reqhdrrewritepolicylabel = ""
self._reshdrrewritepolicylabel = ""
self._regexforfindingurlinjavascript = ""
self._regexforfindingurlincss = ""
self._regexforfindingurlinxcomponent = ""
self._regexforfindingurlinxml = ""
self._regexforfindingcustomurls = ""
self._clientconsumedcookies = ""
self._requirepersistentcookie = ""
self._cssrewritepolicylabel = ""
self._xmlrewritepolicylabel = ""
self._xcomponentrewritepolicylabel = ""
self._isdefault = False
self._description = ""
self._builtin = []
self.___count = 0
@property
def profilename(self) :
"""Name for the NetScaler Gateway clientless access profile. Must begin with an ASCII alphabetic or underscore (_) character, and must consist only of ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my profile" or 'my profile').<br/>Minimum length = 1.
"""
try :
return self._profilename
except Exception as e:
raise e
@profilename.setter
def profilename(self, profilename) :
"""Name for the NetScaler Gateway clientless access profile. Must begin with an ASCII alphabetic or underscore (_) character, and must consist only of ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the profile is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my profile" or 'my profile').<br/>Minimum length = 1
"""
try :
self._profilename = profilename
except Exception as e:
raise e
@property
def urlrewritepolicylabel(self) :
"""Name of the configured URL rewrite policy label. If you do not specify a policy label name, then URLs are not rewritten.<br/>Minimum length = 1.
"""
try :
return self._urlrewritepolicylabel
except Exception as e:
raise e
@urlrewritepolicylabel.setter
def urlrewritepolicylabel(self, urlrewritepolicylabel) :
"""Name of the configured URL rewrite policy label. If you do not specify a policy label name, then URLs are not rewritten.<br/>Minimum length = 1
"""
try :
self._urlrewritepolicylabel = urlrewritepolicylabel
except Exception as e:
raise e
@property
def javascriptrewritepolicylabel(self) :
"""Name of the configured JavaScript rewrite policy label. If you do not specify a policy label name, then JAVA scripts are not rewritten.<br/>Minimum length = 1.
"""
try :
return self._javascriptrewritepolicylabel
except Exception as e:
raise e
@javascriptrewritepolicylabel.setter
def javascriptrewritepolicylabel(self, javascriptrewritepolicylabel) :
"""Name of the configured JavaScript rewrite policy label. If you do not specify a policy label name, then JAVA scripts are not rewritten.<br/>Minimum length = 1
"""
try :
self._javascriptrewritepolicylabel = javascriptrewritepolicylabel
except Exception as e:
raise e
@property
def reqhdrrewritepolicylabel(self) :
"""Name of the configured Request rewrite policy label. If you do not specify a policy label name, then requests are not rewritten.<br/>Minimum length = 1.
"""
try :
return self._reqhdrrewritepolicylabel
except Exception as e:
raise e
@reqhdrrewritepolicylabel.setter
def reqhdrrewritepolicylabel(self, reqhdrrewritepolicylabel) :
"""Name of the configured Request rewrite policy label. If you do not specify a policy label name, then requests are not rewritten.<br/>Minimum length = 1
"""
try :
self._reqhdrrewritepolicylabel = reqhdrrewritepolicylabel
except Exception as e:
raise e
@property
def reshdrrewritepolicylabel(self) :
"""Name of the configured Response rewrite policy label.<br/>Minimum length = 1.
"""
try :
return self._reshdrrewritepolicylabel
except Exception as e:
raise e
@reshdrrewritepolicylabel.setter
def reshdrrewritepolicylabel(self, reshdrrewritepolicylabel) :
"""Name of the configured Response rewrite policy label.<br/>Minimum length = 1
"""
try :
self._reshdrrewritepolicylabel = reshdrrewritepolicylabel
except Exception as e:
raise e
@property
def regexforfindingurlinjavascript(self) :
"""Name of the pattern set that contains the regular expressions, which match the URL in Java script.<br/>Minimum length = 1.
"""
try :
return self._regexforfindingurlinjavascript
except Exception as e:
raise e
@regexforfindingurlinjavascript.setter
def regexforfindingurlinjavascript(self, regexforfindingurlinjavascript) :
"""Name of the pattern set that contains the regular expressions, which match the URL in Java script.<br/>Minimum length = 1
"""
try :
self._regexforfindingurlinjavascript = regexforfindingurlinjavascript
except Exception as e:
raise e
@property
def regexforfindingurlincss(self) :
"""Name of the pattern set that contains the regular expressions, which match the URL in the CSS.<br/>Minimum length = 1.
"""
try :
return self._regexforfindingurlincss
except Exception as e:
raise e
@regexforfindingurlincss.setter
def regexforfindingurlincss(self, regexforfindingurlincss) :
"""Name of the pattern set that contains the regular expressions, which match the URL in the CSS.<br/>Minimum length = 1
"""
try :
self._regexforfindingurlincss = regexforfindingurlincss
except Exception as e:
raise e
@property
def regexforfindingurlinxcomponent(self) :
"""Name of the pattern set that contains the regular expressions, which match the URL in X Component.<br/>Minimum length = 1.
"""
try :
return self._regexforfindingurlinxcomponent
except Exception as e:
raise e
@regexforfindingurlinxcomponent.setter
def regexforfindingurlinxcomponent(self, regexforfindingurlinxcomponent) :
"""Name of the pattern set that contains the regular expressions, which match the URL in X Component.<br/>Minimum length = 1
"""
try :
self._regexforfindingurlinxcomponent = regexforfindingurlinxcomponent
except Exception as e:
raise e
@property
def regexforfindingurlinxml(self) :
"""Name of the pattern set that contains the regular expressions, which match the URL in XML.<br/>Minimum length = 1.
"""
try :
return self._regexforfindingurlinxml
except Exception as e:
raise e
@regexforfindingurlinxml.setter
def regexforfindingurlinxml(self, regexforfindingurlinxml) :
"""Name of the pattern set that contains the regular expressions, which match the URL in XML.<br/>Minimum length = 1
"""
try :
self._regexforfindingurlinxml = regexforfindingurlinxml
except Exception as e:
raise e
@property
def regexforfindingcustomurls(self) :
"""Name of the pattern set that contains the regular expressions, which match the URLs in the custom content type other than HTML, CSS, XML, XCOMP, and JavaScript. The custom content type should be included in the patset ns_cvpn_custom_content_types.<br/>Minimum length = 1.
"""
try :
return self._regexforfindingcustomurls
except Exception as e:
raise e
@regexforfindingcustomurls.setter
def regexforfindingcustomurls(self, regexforfindingcustomurls) :
"""Name of the pattern set that contains the regular expressions, which match the URLs in the custom content type other than HTML, CSS, XML, XCOMP, and JavaScript. The custom content type should be included in the patset ns_cvpn_custom_content_types.<br/>Minimum length = 1
"""
try :
self._regexforfindingcustomurls = regexforfindingcustomurls
except Exception as e:
raise e
@property
def clientconsumedcookies(self) :
"""Specify the name of the pattern set containing the names of the cookies, which are allowed between the client and the server. If a pattern set is not specified, NetSCaler Gateway does not allow any cookies between the client and the server. A cookie that is not specified in the pattern set is handled by NetScaler Gateway on behalf of the client.<br/>Minimum length = 1.
"""
try :
return self._clientconsumedcookies
except Exception as e:
raise e
@clientconsumedcookies.setter
def clientconsumedcookies(self, clientconsumedcookies) :
"""Specify the name of the pattern set containing the names of the cookies, which are allowed between the client and the server. If a pattern set is not specified, NetSCaler Gateway does not allow any cookies between the client and the server. A cookie that is not specified in the pattern set is handled by NetScaler Gateway on behalf of the client.<br/>Minimum length = 1
"""
try :
self._clientconsumedcookies = clientconsumedcookies
except Exception as e:
raise e
@property
def requirepersistentcookie(self) :
"""Specify whether a persistent session cookie is set and accepted for clientless access. If this parameter is set to ON, COM objects, such as MSOffice, which are invoked by the browser can access the files using clientless access. Use caution because the persistent cookie is stored on the disk.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._requirepersistentcookie
except Exception as e:
raise e
@requirepersistentcookie.setter
def requirepersistentcookie(self, requirepersistentcookie) :
"""Specify whether a persistent session cookie is set and accepted for clientless access. If this parameter is set to ON, COM objects, such as MSOffice, which are invoked by the browser can access the files using clientless access. Use caution because the persistent cookie is stored on the disk.<br/>Default value: OFF<br/>Possible values = ON, OFF
"""
try :
self._requirepersistentcookie = requirepersistentcookie
except Exception as e:
raise e
@property
def cssrewritepolicylabel(self) :
"""The configured CSS rewrite policylabel.<br/>Minimum length = 1.
"""
try :
return self._cssrewritepolicylabel
except Exception as e:
raise e
@property
def xmlrewritepolicylabel(self) :
"""The configured XML rewrite policylabel.<br/>Minimum length = 1.
"""
try :
return self._xmlrewritepolicylabel
except Exception as e:
raise e
@property
def xcomponentrewritepolicylabel(self) :
"""The configured X-Component rewrite policylabel.<br/>Minimum length = 1.
"""
try :
return self._xcomponentrewritepolicylabel
except Exception as e:
raise e
@property
def isdefault(self) :
"""A value of true is returned if it is a default vpnclientlessrwprofile.
"""
try :
return self._isdefault
except Exception as e:
raise e
@property
def description(self) :
"""Description of the clientless access profile.
"""
try :
return self._description
except Exception as e:
raise e
@property
def builtin(self) :
"""Flag to determine if vpn clientless rewrite profile is built-in or not.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnclientlessaccessprofile_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnclientlessaccessprofile
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.profilename) :
return str(self.profilename)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add vpnclientlessaccessprofile.
"""
try :
if type(resource) is not list :
addresource = vpnclientlessaccessprofile()
addresource.profilename = resource.profilename
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].profilename = resource[i].profilename
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete vpnclientlessaccessprofile.
"""
try :
if type(resource) is not list :
deleteresource = vpnclientlessaccessprofile()
if type(resource) != type(deleteresource):
deleteresource.profilename = resource
else :
deleteresource.profilename = resource.profilename
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].profilename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].profilename = resource[i].profilename
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update vpnclientlessaccessprofile.
"""
try :
if type(resource) is not list :
updateresource = vpnclientlessaccessprofile()
updateresource.profilename = resource.profilename
updateresource.urlrewritepolicylabel = resource.urlrewritepolicylabel
updateresource.javascriptrewritepolicylabel = resource.javascriptrewritepolicylabel
updateresource.reqhdrrewritepolicylabel = resource.reqhdrrewritepolicylabel
updateresource.reshdrrewritepolicylabel = resource.reshdrrewritepolicylabel
updateresource.regexforfindingurlinjavascript = resource.regexforfindingurlinjavascript
updateresource.regexforfindingurlincss = resource.regexforfindingurlincss
updateresource.regexforfindingurlinxcomponent = resource.regexforfindingurlinxcomponent
updateresource.regexforfindingurlinxml = resource.regexforfindingurlinxml
updateresource.regexforfindingcustomurls = resource.regexforfindingcustomurls
updateresource.clientconsumedcookies = resource.clientconsumedcookies
updateresource.requirepersistentcookie = resource.requirepersistentcookie
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].profilename = resource[i].profilename
updateresources[i].urlrewritepolicylabel = resource[i].urlrewritepolicylabel
updateresources[i].javascriptrewritepolicylabel = resource[i].javascriptrewritepolicylabel
updateresources[i].reqhdrrewritepolicylabel = resource[i].reqhdrrewritepolicylabel
updateresources[i].reshdrrewritepolicylabel = resource[i].reshdrrewritepolicylabel
updateresources[i].regexforfindingurlinjavascript = resource[i].regexforfindingurlinjavascript
updateresources[i].regexforfindingurlincss = resource[i].regexforfindingurlincss
updateresources[i].regexforfindingurlinxcomponent = resource[i].regexforfindingurlinxcomponent
updateresources[i].regexforfindingurlinxml = resource[i].regexforfindingurlinxml
updateresources[i].regexforfindingcustomurls = resource[i].regexforfindingcustomurls
updateresources[i].clientconsumedcookies = resource[i].clientconsumedcookies
updateresources[i].requirepersistentcookie = resource[i].requirepersistentcookie
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of vpnclientlessaccessprofile resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = vpnclientlessaccessprofile()
if type(resource) != type(unsetresource):
unsetresource.profilename = resource
else :
unsetresource.profilename = resource.profilename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].profilename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ vpnclientlessaccessprofile() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].profilename = resource[i].profilename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the vpnclientlessaccessprofile resources that are configured on netscaler.
"""
try :
if not name :
obj = vpnclientlessaccessprofile()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = vpnclientlessaccessprofile()
obj.profilename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [vpnclientlessaccessprofile() for _ in range(len(name))]
obj = [vpnclientlessaccessprofile() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = vpnclientlessaccessprofile()
obj[i].profilename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of vpnclientlessaccessprofile resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnclientlessaccessprofile()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the vpnclientlessaccessprofile resources configured on NetScaler.
"""
try :
obj = vpnclientlessaccessprofile()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of vpnclientlessaccessprofile resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnclientlessaccessprofile()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Requirepersistentcookie:
ON = "ON"
OFF = "OFF"
class vpnclientlessaccessprofile_response(base_response) :
def __init__(self, length=1) :
self.vpnclientlessaccessprofile = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnclientlessaccessprofile = [vpnclientlessaccessprofile() for _ in range(length)]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._file_shares_operations import build_create_request, build_delete_request, build_get_request, build_lease_request, build_list_request, build_restore_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FileSharesOperations:
"""FileSharesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.FileShareItems"]:
"""Lists all shares.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of shares that can be included in the
list.
:type maxpagesize: str
:param filter: Optional. When specified, only share names starting with the filter will be
listed.
:type filter: str
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: deleted, snapshots. Should be passed as a string with delimiter ','.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileShareItems or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_04_01.models.FileShareItems]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShareItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FileShareItems", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares'} # type: ignore
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: "_models.FileShare",
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.FileShare":
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties of the file share to create.
:type file_share: ~azure.mgmt.storage.v2021_04_01.models.FileShare
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: snapshots. Should be passed as a string with delimiter ','.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(file_share, 'FileShare')
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
expand=expand,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FileShare', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: "_models.FileShare",
**kwargs: Any
) -> "_models.FileShare":
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param file_share: Properties to update for the file share.
:type file_share: ~azure.mgmt.storage.v2021_04_01.models.FileShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(file_share, 'FileShare')
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
share_name: str,
expand: Optional[str] = None,
x_ms_snapshot: Optional[str] = None,
**kwargs: Any
) -> "_models.FileShare":
"""Gets properties of a specified share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param expand: Optional, used to expand the properties within share's properties. Valid values
are: stats. Should be passed as a string with delimiter ','.
:type expand: str
:param x_ms_snapshot: Optional, used to retrieve properties of a snapshot.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.FileShare
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileShare"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
expand=expand,
x_ms_snapshot=x_ms_snapshot,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FileShare', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
account_name: str,
share_name: str,
x_ms_snapshot: Optional[str] = None,
include: Optional[str] = None,
**kwargs: Any
) -> None:
"""Deletes specified share under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param x_ms_snapshot: Optional, used to delete a snapshot.
:type x_ms_snapshot: str
:param include: Optional. Valid values are: snapshots, leased-snapshots, none. The default
value is snapshots. For 'snapshots', the file share is deleted including all of its file share
snapshots. If the file share contains leased-snapshots, the deletion fails. For
'leased-snapshots', the file share is deleted included all of its file share snapshots
(leased/unleased). For 'none', the file share is deleted if it has no share snapshots. If the
file share contains any snapshots (leased or unleased), the deletion fails.
:type include: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
x_ms_snapshot=x_ms_snapshot,
include=include,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}'} # type: ignore
@distributed_trace_async
async def restore(
self,
resource_group_name: str,
account_name: str,
share_name: str,
deleted_share: "_models.DeletedShare",
**kwargs: Any
) -> None:
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param deleted_share:
:type deleted_share: ~azure.mgmt.storage.v2021_04_01.models.DeletedShare
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(deleted_share, 'DeletedShare')
request = build_restore_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.restore.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
restore.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore'} # type: ignore
@distributed_trace_async
async def lease(
self,
resource_group_name: str,
account_name: str,
share_name: str,
x_ms_snapshot: Optional[str] = None,
parameters: Optional["_models.LeaseShareRequest"] = None,
**kwargs: Any
) -> "_models.LeaseShareResponse":
"""The Lease Share operation establishes and manages a lock on a share for delete operations. The
lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number.
:type share_name: str
:param x_ms_snapshot: Optional. Specify the snapshot time to lease a snapshot.
:type x_ms_snapshot: str
:param parameters: Lease Share request body.
:type parameters: ~azure.mgmt.storage.v2021_04_01.models.LeaseShareRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseShareResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.LeaseShareResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseShareResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'LeaseShareRequest')
else:
_json = None
request = build_lease_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
x_ms_snapshot=x_ms_snapshot,
template_url=self.lease.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('LeaseShareResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/lease'} # type: ignore
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#/******************************************************************************
# * $Id$
# *
# * Project: GDAL
# * Purpose: GDAL API_PROXY server written in Python
# * Author: Even Rouault, <even dot rouault at mines-paris dot org>
# *
# ******************************************************************************
# * Copyright (c) 2013, Even Rouault <even dot rouault at mines-paris dot org>
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
# WARNING: only Python 2 compatible for now
import sys
import os
from struct import *
from osgeo import gdalconst, gdal
class GDALPythonServerRasterBand:
def __init__(self, gdal_band):
self.gdal_band = gdal_band
self.XSize = gdal_band.XSize
self.YSize = gdal_band.YSize
self.Band = gdal_band.GetBand()
(self.BlockXSize, self.BlockYSize) = gdal_band.GetBlockSize()
self.DataType = gdal_band.DataType
self.mask_band = None
self.ovr_bands = None
def FlushCache(self):
return self.gdal_band.FlushCache()
def GetColorInterpretation(self):
return self.gdal_band.GetColorInterpretation()
def GetNoDataValue(self):
return self.gdal_band.GetNoDataValue()
def GetMinimum(self):
return self.gdal_band.GetMinimum()
def GetMaximum(self):
return self.gdal_band.GetMaximum()
def GetOffset(self):
return self.gdal_band.GetOffset()
def GetScale(self):
return self.gdal_band.GetScale()
def HasArbitraryOverviews(self):
return self.gdal_band.HasArbitraryOverviews()
def GetOverviewCount(self):
return self.gdal_band.GetOverviewCount()
def GetMaskFlags(self):
return self.gdal_band.GetMaskFlags()
def GetMaskBand(self):
if self.mask_band is None:
gdal_mask_band = self.gdal_band.GetMaskBand()
if gdal_mask_band is not None:
self.mask_band = GDALPythonServerRasterBand(gdal_mask_band)
return self.mask_band
def GetOverview(self, iovr):
if self.ovr_bands is None:
self.ovr_bands = [None for i in range(self.GetOverviewCount())]
if self.ovr_bands[iovr] is None:
gdal_ovr_band = self.gdal_band.GetOverview(iovr)
if gdal_ovr_band is not None:
self.ovr_bands[iovr] = GDALPythonServerRasterBand(gdal_ovr_band)
return self.ovr_bands[iovr]
def GetMetadata(self, domain):
return self.gdal_band.GetMetadata(domain)
def GetMetadataItem(self, key, domain):
return self.gdal_band.GetMetadataItem(key, domain)
def IReadBlock(self, nXBlockOff, nYBlockOff):
return self.gdal_band.ReadBlock(nXBlockOff, nYBlockOff)
def IRasterIO_Read(self, nXOff, nYOff, nXSize, nYSize, nBufXSize, nBufYSize, nBufType):
return self.gdal_band.ReadRaster(nXOff, nYOff, nXSize, nYSize, buf_xsize = nBufXSize, buf_ysize = nBufYSize, buf_type = nBufType)
def GetUnitType(self):
return self.gdal_band.GetUnitType()
def GetStatistics(self, approx_ok, force):
return self.gdal_band.GetStatistics(approx_ok, force)
def ComputeRasterMinMax(self, approx_ok):
return self.gdal_band.ComputeRasterMinMax(approx_ok)
def GetColorTable(self):
return self.gdal_band.GetColorTable()
def GetHistogram(self, dfMin, dfMax, nBuckets, bIncludeOutOfRange, bApproxOK):
return self.gdal_band.GetHistogram(dfMin, dfMax, nBuckets, include_out_of_range = bIncludeOutOfRange, approx_ok = bApproxOK)
class GDALPythonServerDataset:
def __init__(self, filename, access = gdal.GA_ReadOnly):
self.gdal_ds = gdal.Open(filename, access)
if self.gdal_ds is None:
raise Exception(gdal.GetLastErrorMsg())
self.RasterXSize = self.gdal_ds.RasterXSize
self.RasterYSize = self.gdal_ds.RasterYSize
self.RasterCount = self.gdal_ds.RasterCount
self.bands = []
for i in range(self.RasterCount):
gdal_band = self.gdal_ds.GetRasterBand(i+1)
self.bands.append(GDALPythonServerRasterBand(gdal_band))
def __del__(self):
self.gdal_ds = None
def GetDriver(self):
return self.gdal_ds.GetDriver()
def GetRasterBand(self, i):
return self.bands[i-1]
def GetDescription(self):
return self.gdal_ds.GetDescription()
def GetGeoTransform(self):
return self.gdal_ds.GetGeoTransform()
def GetProjectionRef(self):
return self.gdal_ds.GetProjectionRef()
def GetGCPCount(self):
return self.gdal_ds.GetGCPCount()
def GetFileList(self):
return self.gdal_ds.GetFileList()
def GetMetadata(self, domain):
return self.gdal_ds.GetMetadata(domain)
def GetMetadataItem(self, key, domain):
return self.gdal_ds.GetMetadataItem(key, domain)
def FlushCache(self):
self.gdal_ds.FlushCache()
return
def IRasterIO_Read(self, nXOff, nYOff, nXSize, nYSize, nBufXSize, nBufYSize, \
nBufType, panBandMap, nPixelSpace, nLineSpace, nBandSpace):
return self.gdal_ds.ReadRaster(nXOff, nYOff, nXSize, nYSize, \
buf_xsize = nBufXSize, buf_ysize = nBufYSize, \
buf_type = nBufType, band_list = panBandMap, \
buf_pixel_space = nPixelSpace, buf_line_space = nLineSpace, buf_band_space = nBandSpace)
INSTR_GetGDALVersion = 1
INSTR_EXIT = 2
INSTR_EXIT_FAIL = 3
INSTR_SetConfigOption = 4
#INSTR_Progress = 5
INSTR_Reset = 6
INSTR_Open = 7
INSTR_Identify = 8
INSTR_Create = 9
INSTR_CreateCopy = 10
INSTR_QuietDelete = 11
#INSTR_AddBand = 12
INSTR_GetGeoTransform = 13
#INSTR_SetGeoTransform = 14
INSTR_GetProjectionRef = 15
#INSTR_SetProjection = 16
INSTR_GetGCPCount = 17
#INSTR_GetGCPProjection = 18
#INSTR_GetGCPs = 19
#INSTR_SetGCPs = 20
INSTR_GetFileList = 21
INSTR_FlushCache = 22
#INSTR_SetDescription = 23
INSTR_GetMetadata = 24
INSTR_GetMetadataItem = 25
#INSTR_SetMetadata = 26
#INSTR_SetMetadataItem = 27
INSTR_IRasterIO_Read = 28
#INSTR_IRasterIO_Write = 29
#INSTR_IBuildOverviews = 30
#INSTR_AdviseRead = 31
#INSTR_CreateMaskBand = 32
INSTR_Band_First = 33
INSTR_Band_FlushCache = 34
INSTR_Band_GetCategoryNames = 35
#INSTR_Band_SetCategoryNames = 36
#INSTR_Band_SetDescription = 37
INSTR_Band_GetMetadata = 38
INSTR_Band_GetMetadataItem = 39
INSTR_Band_SetMetadata = 40
INSTR_Band_SetMetadataItem = 41
INSTR_Band_GetColorInterpretation = 42
#INSTR_Band_SetColorInterpretation = 43
INSTR_Band_GetNoDataValue = 44
INSTR_Band_GetMinimum = 45
INSTR_Band_GetMaximum = 46
INSTR_Band_GetOffset = 47
INSTR_Band_GetScale = 48
#INSTR_Band_SetNoDataValue = 49
#INSTR_Band_SetOffset = 50
#INSTR_Band_SetScale = 51
INSTR_Band_IReadBlock = 52
#INSTR_Band_IWriteBlock = 53
INSTR_Band_IRasterIO_Read = 54
#INSTR_Band_IRasterIO_Write = 55
INSTR_Band_GetStatistics = 56
#INSTR_Band_ComputeStatistics = 57
#INSTR_Band_SetStatistics = 58
INSTR_Band_ComputeRasterMinMax = 59
INSTR_Band_GetHistogram = 60
INSTR_Band_GetDefaultHistogram = 61
#INSTR_Band_SetDefaultHistogram = 62
INSTR_Band_HasArbitraryOverviews = 63
INSTR_Band_GetOverviewCount = 64
INSTR_Band_GetOverview = 65
INSTR_Band_GetMaskBand = 66
INSTR_Band_GetMaskFlags = 67
#INSTR_Band_CreateMaskBand = 68
#INSTR_Band_Fill = 69
INSTR_Band_GetColorTable = 70
#INSTR_Band_SetColorTable = 71
INSTR_Band_GetUnitType = 72
#INSTR_Band_SetUnitType = 73
#INSTR_Band_BuildOverviews = 74
INSTR_Band_GetDefaultRAT = 75
#INSTR_Band_SetDefaultRAT = 76
#INSTR_Band_AdviseRead = 77
INSTR_Band_End = 78
#INSTR_END = 79
caps_list = [
INSTR_GetGDALVersion,
INSTR_EXIT,
INSTR_EXIT_FAIL,
INSTR_SetConfigOption,
#INSTR_Progress,
INSTR_Reset,
INSTR_Open,
INSTR_Identify,
INSTR_Create,
INSTR_CreateCopy,
INSTR_QuietDelete,
#INSTR_AddBand,
INSTR_GetGeoTransform,
#INSTR_SetGeoTransform,
INSTR_GetProjectionRef,
#INSTR_SetProjection,
INSTR_GetGCPCount,
#INSTR_GetGCPProjection,
#INSTR_GetGCPs,
#INSTR_SetGCPs,
INSTR_GetFileList,
INSTR_FlushCache,
#INSTR_SetDescription,
INSTR_GetMetadata,
INSTR_GetMetadataItem,
#INSTR_SetMetadata,
#INSTR_SetMetadataItem,
INSTR_IRasterIO_Read,
#INSTR_IRasterIO_Write,
#INSTR_IBuildOverviews,
#INSTR_AdviseRead,
#INSTR_CreateMaskBand,
#INSTR_Band_First,
INSTR_Band_FlushCache,
INSTR_Band_GetCategoryNames,
#INSTR_Band_SetCategoryNames,
#INSTR_Band_SetDescription,
INSTR_Band_GetMetadata,
INSTR_Band_GetMetadataItem,
INSTR_Band_SetMetadata,
INSTR_Band_SetMetadataItem,
INSTR_Band_GetColorInterpretation,
#INSTR_Band_SetColorInterpretation,
INSTR_Band_GetNoDataValue,
INSTR_Band_GetMinimum,
INSTR_Band_GetMaximum,
INSTR_Band_GetOffset,
INSTR_Band_GetScale,
#INSTR_Band_SetNoDataValue,
#INSTR_Band_SetOffset,
#INSTR_Band_SetScale,
INSTR_Band_IReadBlock,
#INSTR_Band_IWriteBlock,
INSTR_Band_IRasterIO_Read,
#INSTR_Band_IRasterIO_Write,
INSTR_Band_GetStatistics,
#INSTR_Band_ComputeStatistics,
#INSTR_Band_SetStatistics,
INSTR_Band_ComputeRasterMinMax,
INSTR_Band_GetHistogram,
#INSTR_Band_GetDefaultHistogram,
#INSTR_Band_SetDefaultHistogram,
INSTR_Band_HasArbitraryOverviews,
INSTR_Band_GetOverviewCount,
INSTR_Band_GetOverview,
INSTR_Band_GetMaskBand,
INSTR_Band_GetMaskFlags,
#INSTR_Band_CreateMaskBand,
#INSTR_Band_Fill,
INSTR_Band_GetColorTable,
#INSTR_Band_SetColorTable,
INSTR_Band_GetUnitType,
#INSTR_Band_SetUnitType,
#INSTR_Band_BuildOverviews,
#INSTR_Band_GetDefaultRAT,
#INSTR_Band_SetDefaultRAT,
#INSTR_Band_AdviseRead ,
#INSTR_Band_End,
#INSTR_END = 79
]
CE_None = 0
CE_Failure = 3
VERBOSE = 0
def read_int():
if sys.version_info >= (3,0,0):
return unpack('i', sys.stdin.read(4).encode('latin1'))[0]
else:
return unpack('i', sys.stdin.read(4))[0]
def read_double():
if sys.version_info >= (3,0,0):
return unpack('d', sys.stdin.read(8).encode('latin1'))[0]
else:
return unpack('d', sys.stdin.read(8))[0]
def read_str():
length = read_int()
if length <= 0:
return None
str = sys.stdin.read(length)
if len(str) > 0 and str[len(str)-1] == '\0':
str = str[0:len(str)-1]
return str
def read_strlist():
count = read_int()
strlist = []
for i in range(count):
strlist.append(read_str())
return strlist
def write_int(i):
if i is True:
v = pack('i', 1)
elif i is False or i is None:
v = pack('i', 0)
else:
v = pack('i', i)
if sys.version_info >= (3,0,0):
sys.stdout.write(v.decode('latin1'))
else:
sys.stdout.write(v)
def write_double(d):
if sys.version_info >= (3,0,0):
sys.stdout.write(pack('d', d).decode('latin1'))
else:
sys.stdout.write(pack('d', d))
def write_str(s):
if s is None:
write_int(0)
else:
l = len(s)
write_int(l+1)
sys.stdout.write(s)
sys.stdout.write('\x00')
def write_band(band, isrv_num):
if band is not None:
write_int(isrv_num) # srv band count
write_int(band.Band) # band number
write_int(0) # access
write_int(band.XSize) # X
write_int(band.YSize) # Y
write_int(band.DataType) # data type
write_int(band.BlockXSize) # block x size
write_int(band.BlockYSize) # block y size
write_str('') # band description
else:
write_int(-1)
def write_ct(ct):
if ct is None:
write_int(-1)
else:
write_int(ct.GetPaletteInterpretation())
nCount = ct.GetCount()
write_int(nCount)
for i in range(nCount):
entry = ct.GetColorEntry(i)
write_int(entry[0])
write_int(entry[1])
write_int(entry[2])
write_int(entry[3])
def write_marker():
sys.stdout.write('\xDE\xAD\xBE\xEF')
def write_zero_error():
write_int(0)
def main_loop():
server_ds = None
server_bands = []
gdal.SetConfigOption('GDAL_API_PROXY', 'NO')
while 1:
sys.stdout.flush()
instr = read_int()
if VERBOSE:
sys.stderr.write('instr=%d\n' % instr)
band = None
if instr >= INSTR_Band_First and instr <= INSTR_Band_End:
srv_band = read_int()
band = server_bands[srv_band]
if instr == INSTR_GetGDALVersion:
if sys.version_info >= (3,0,0):
lsb = unpack('B', sys.stdin.read(1).encode('latin1'))[0]
else:
lsb = unpack('B', sys.stdin.read(1))[0]
ver = read_str()
vmajor = read_int()
vminor = read_int()
protovmajor = read_int()
protovminor = read_int()
extra_bytes = read_int()
if VERBOSE:
sys.stderr.write('lsb=%d\n' % lsb)
sys.stderr.write('ver=%s\n' % ver)
sys.stderr.write('vmajor=%d\n' % vmajor)
sys.stderr.write('vminor=%d\n' % vminor)
sys.stderr.write('protovmajor=%d\n' % protovmajor)
sys.stderr.write('protovminor=%d\n' % protovminor)
sys.stderr.write('extra_bytes=%d\n' % extra_bytes)
write_str('1.10')
write_int(1) # vmajor
write_int(10) # vminor
write_int(1) # protovmajor
write_int(0) # protovminor
write_int(0) # extra bytes
continue
elif instr == INSTR_EXIT:
server_ds = None
server_bands = []
write_marker()
write_int(1)
sys.exit(0)
elif instr == INSTR_EXIT_FAIL:
server_ds = None
server_bands = []
write_marker()
write_int(1)
sys.exit(1)
elif instr == INSTR_SetConfigOption:
key = read_str()
val = read_str()
gdal.SetConfigOption(key, val)
if VERBOSE:
sys.stderr.write('key=%s\n' % key)
sys.stderr.write('val=%s\n' % val)
continue
elif instr == INSTR_Reset:
#if server_ds is not None:
# sys.stderr.write('Reset(%s)\n' % server_ds.GetDescription())
server_ds = None
server_bands = []
write_marker()
write_int(1)
elif instr == INSTR_Open:
access = read_int()
filename = read_str()
cwd = read_str()
if cwd is not None:
os.chdir(cwd)
if VERBOSE:
sys.stderr.write('access=%d\n' % access)
sys.stderr.write('filename=%s\n' % filename)
sys.stderr.write('cwd=%s\n' % cwd)
#sys.stderr.write('Open(%s)\n' % filename)
try:
server_ds = GDALPythonServerDataset(filename, access)
except:
server_ds = None
write_marker()
if server_ds is None:
write_int(0) # Failure
else:
write_int(1) # Success
write_int(16) # caps length
caps = [ 0 for i in range(16)]
for cap in caps_list:
caps[int(cap / 8)] = caps[int(cap / 8)] | (1 << (cap % 8))
for i in range(16):
sys.stdout.write(pack('B', caps[i])) # caps
write_str(server_ds.GetDescription())
drv = server_ds.GetDriver()
if drv is not None:
write_str(drv.GetDescription())
write_int(0) # End of driver metadata
else:
write_str(None)
write_int(server_ds.RasterXSize) # X
write_int(server_ds.RasterYSize) # Y
write_int(server_ds.RasterCount) # Band count
write_int(1) # All bands are identical
if server_ds.RasterCount > 0:
write_band(server_ds.GetRasterBand(1), len(server_bands))
for i in range(server_ds.RasterCount):
server_bands.append(server_ds.GetRasterBand(i + 1))
elif instr == INSTR_Identify:
filename = read_str()
cwd = read_str()
dr = gdal.IdentifyDriver(filename)
write_marker()
if dr is None:
write_int(0)
else:
write_int(1)
elif instr == INSTR_Create:
filename = read_str()
cwd = read_str()
xsize = read_int()
ysize = read_int()
bands = read_int()
datatype = read_int()
options = read_strlist()
write_marker()
# FIXME
write_int(0)
elif instr == INSTR_CreateCopy:
filename = read_str()
src_description = read_str()
cwd = read_str()
strict = read_int()
options = read_strlist()
# FIXME
write_int(0)
elif instr == INSTR_QuietDelete:
filename = read_str()
cwd = read_str()
write_marker()
# FIXME
elif instr == INSTR_GetGeoTransform:
gt = server_ds.GetGeoTransform()
write_marker()
if gt is not None:
write_int(CE_None)
write_int(6 * 8)
for i in range(6):
write_double(gt[i])
else:
write_int(CE_Failure)
write_int(6 * 8)
write_double(0)
write_double(1)
write_double(0)
write_double(0)
write_double(0)
write_double(1)
elif instr == INSTR_GetProjectionRef:
write_marker()
write_str(server_ds.GetProjectionRef())
elif instr == INSTR_GetGCPCount:
write_marker()
write_int(server_ds.GetGCPCount())
elif instr == INSTR_GetFileList:
write_marker()
fl = server_ds.GetFileList()
write_int(len(fl))
for i in range(len(fl)):
write_str(fl[i])
elif instr == INSTR_GetMetadata:
domain = read_str()
md = server_ds.GetMetadata(domain)
write_marker()
write_int(len(md))
for key in md:
write_str('%s=%s' % (key, md[key]))
elif instr == INSTR_GetMetadataItem:
key = read_str()
domain = read_str()
val = server_ds.GetMetadataItem(key, domain)
write_marker()
write_str(val)
elif instr == INSTR_IRasterIO_Read:
nXOff = read_int()
nYOff = read_int()
nXSize = read_int()
nYSize = read_int()
nBufXSize = read_int()
nBufYSize = read_int()
nBufType = read_int()
nBandCount = read_int()
panBandMap = []
size = read_int()
for i in range(nBandCount):
panBandMap.append(read_int())
nPixelSpace = read_int()
nLineSpace = read_int()
nBandSpace = read_int()
val = server_ds.IRasterIO_Read(nXOff, nYOff, nXSize, nYSize, nBufXSize, nBufYSize, nBufType, panBandMap, nPixelSpace, nLineSpace, nBandSpace)
write_marker()
if val is None:
write_int(CE_Failure)
write_int(0)
else:
write_int(CE_None)
write_int(len(val))
sys.stdout.write(val)
elif instr == INSTR_FlushCache:
if server_ds is not None:
server_ds.FlushCache()
write_marker()
elif instr == INSTR_Band_FlushCache:
val = band.FlushCache()
write_marker()
write_int(val)
elif instr == INSTR_Band_GetCategoryNames:
write_marker()
# FIXME
write_int(-1)
elif instr == INSTR_Band_GetMetadata:
domain = read_str()
md = band.GetMetadata(domain)
write_marker()
write_int(len(md))
for key in md:
write_str('%s=%s' % (key, md[key]))
elif instr == INSTR_Band_GetMetadataItem:
key = read_str()
domain = read_str()
val = band.GetMetadataItem(key, domain)
write_marker()
write_str(val)
elif instr == INSTR_Band_GetColorInterpretation:
val = band.GetColorInterpretation()
write_marker()
write_int(val)
elif instr == INSTR_Band_GetNoDataValue:
val = band.GetNoDataValue()
write_marker()
if val is None:
write_int(0)
write_double(0)
else:
write_int(1)
write_double(val)
elif instr == INSTR_Band_GetMinimum:
val = band.GetMinimum()
write_marker()
if val is None:
write_int(0)
write_double(0)
else:
write_int(1)
write_double(val)
elif instr == INSTR_Band_GetMaximum:
val = band.GetMaximum()
write_marker()
if val is None:
write_int(0)
write_double(0)
else:
write_int(1)
write_double(val)
elif instr == INSTR_Band_GetOffset:
val = band.GetOffset()
write_marker()
if val is None:
write_int(0)
write_double(0)
else:
write_int(1)
write_double(val)
elif instr == INSTR_Band_GetScale:
val = band.GetScale()
write_marker()
if val is None:
write_int(0)
write_double(1) #default value is 1
else:
write_int(1)
write_double(val)
elif instr == INSTR_Band_IReadBlock:
nXBlockOff = read_int()
nYBlockOff = read_int()
val = band.IReadBlock(nXBlockOff, nYBlockOff)
write_marker()
if val is None:
write_int(CE_Failure)
l = band.BlockXSize * band.BlockYSize * (gdal.GetDataTypeSize(band.DataType) / 8)
write_int(l)
sys.stdout.write(''.join('\0' for i in range(l)))
else:
write_int(CE_None)
write_int(len(val))
sys.stdout.write(val)
elif instr == INSTR_Band_IRasterIO_Read:
nXOff = read_int()
nYOff = read_int()
nXSize = read_int()
nYSize = read_int()
nBufXSize = read_int()
nBufYSize = read_int()
nBufType = read_int()
val = band.IRasterIO_Read(nXOff, nYOff, nXSize, nYSize, nBufXSize, nBufYSize, nBufType)
write_marker()
if val is None:
write_int(CE_Failure)
write_int(0)
else:
write_int(CE_None)
write_int(len(val))
sys.stdout.write(val)
elif instr == INSTR_Band_GetStatistics:
approx_ok = read_int()
force = read_int()
val = band.GetStatistics(approx_ok, force)
write_marker()
if val is None or val[3] < 0:
write_int(CE_Failure)
else:
write_int(CE_None)
write_double(val[0])
write_double(val[1])
write_double(val[2])
write_double(val[3])
elif instr == INSTR_Band_ComputeRasterMinMax:
approx_ok = read_int()
val = band.ComputeRasterMinMax(approx_ok)
write_marker()
if val is None:
write_int(CE_Failure)
else:
write_int(CE_None)
write_double(val[0])
write_double(val[1])
elif instr == INSTR_Band_GetHistogram:
dfMin = read_double()
dfMax = read_double()
nBuckets = read_int()
bIncludeOutOfRange = read_int()
bApproxOK = read_int()
val = band.GetHistogram(dfMin, dfMax, nBuckets, bIncludeOutOfRange, bApproxOK)
write_marker()
if val is None:
write_int(CE_Failure)
else:
write_int(CE_None)
write_int(len(val) * 4)
for i in range(len(val)):
write_int(val[i])
#elif instr == INSTR_Band_GetDefaultHistogram:
# bForce = read_int()
# write_marker()
# write_int(CE_Failure)
elif instr == INSTR_Band_HasArbitraryOverviews:
val = band.HasArbitraryOverviews()
write_marker()
write_int(val)
elif instr == INSTR_Band_GetOverviewCount:
val = band.GetOverviewCount()
write_marker()
write_int(val)
elif instr == INSTR_Band_GetOverview:
iovr = read_int()
ovr_band = band.GetOverview(iovr)
write_marker()
write_band(ovr_band, len(server_bands))
if ovr_band is not None:
server_bands.append(ovr_band)
elif instr == INSTR_Band_GetMaskBand:
msk_band = band.GetMaskBand()
write_marker()
write_band(msk_band, len(server_bands))
if msk_band is not None:
server_bands.append(msk_band)
elif instr == INSTR_Band_GetMaskFlags:
val = band.GetMaskFlags()
write_marker()
write_int(val)
elif instr == INSTR_Band_GetColorTable:
ct = band.GetColorTable()
write_marker()
write_ct(ct)
elif instr == INSTR_Band_GetUnitType:
val = band.GetUnitType()
write_marker()
write_str(val)
#elif instr == INSTR_Band_GetDefaultRAT:
# write_marker()
# # FIXME
# write_int(0)
else:
break
write_zero_error()
main_loop()
| |
"""
URI and Identity message tests.
"""
from enum import Enum, auto
from json import dumps, loads
from time import time
from unittest.mock import patch
from hamcrest import (
assert_that,
equal_to,
instance_of,
is_,
)
from marshmallow.fields import Field
from microcosm.api import create_object_graph
from microcosm_pubsub.codecs import PubSubMessageCodec
from microcosm_pubsub.conventions import (
IdentityMessageSchema,
URIMessageSchema,
created,
deleted,
)
from microcosm_pubsub.decorators import schema
from microcosm_pubsub.tests.fixtures import ExampleDaemon, noop_handler
class Foo:
pass
class TestEnum(Enum):
key = auto()
def __str__(self):
return self.name
class EnumField(Field):
"""
Test serialization of non-serializable fields
This is mostly taken from microcosm-flask's EnumField
"""
default_error_messages = {
"by_name": "Invalid enum member {name}",
}
def __init__(self, enum, **kwargs):
super().__init__(**kwargs)
self.enum = enum
def _serialize(self, value, attr, obj, **kwargs):
if value is None:
return value
elif isinstance(value, str) and not isinstance(value, Enum):
return value
else:
return value.name
def _deserialize(self, value, attr, data, **kwargs):
if value is None:
return value
else:
return self._deserialize_by_name(value)
def _deserialize_by_name(self, value):
try:
return getattr(self.enum, value)
except AttributeError:
self.fail("by_name", name=value)
@schema
class CustomMessageSchema(URIMessageSchema):
"""
Message indicating that a resource was created
"""
MEDIA_TYPE = created("Resource")
enumField = EnumField(TestEnum, attribute="enum_field", required=True)
def test_encode_uri_message_schema():
"""
Message encoding should include the standard URIMessage fields.
"""
schema = URIMessageSchema()
codec = PubSubMessageCodec(schema)
assert_that(
loads(codec.encode(
opaque_data=dict(foo="bar"),
uri="http://example.com",
media_type="application/vnd.globality.pubsub._.deleted.foo",
)),
is_(equal_to({
"mediaType": "application/vnd.globality.pubsub._.deleted.foo",
"opaqueData": {
"foo": "bar",
},
"uri": "http://example.com",
})),
)
def test_encode_custom_message():
"""
Custom message encoding should include the standard URIMessage fields plus additionally
specified fields
"""
custom_schema = CustomMessageSchema()
codec = PubSubMessageCodec(custom_schema)
assert_that(
loads(codec.encode(
enum_field=TestEnum.key,
media_type=CustomMessageSchema.MEDIA_TYPE,
opaque_data=dict(foo="bar"),
uri="http://example.com",
)),
is_(equal_to({
"mediaType": "application/vnd.globality.pubsub._.created.resource",
"opaqueData": {
"foo": "bar",
},
"uri": "http://example.com",
"enumField": "key",
})),
)
def test_encode_identity_message_schema():
"""
Message encoding should include the standard IdentityMessage fields.
"""
schema = IdentityMessageSchema()
codec = PubSubMessageCodec(schema)
assert_that(
loads(codec.encode(
opaque_data=dict(foo="bar"),
media_type="application/vnd.globality.pubsub._.deleted.foo",
id="1",
)),
is_(equal_to({
"mediaType": "application/vnd.globality.pubsub._.deleted.foo",
"opaqueData": {
"foo": "bar",
},
"id": "1",
})),
)
def test_decode_uri_message_schema():
"""
Message decoding should process standard URIMessage fields.
"""
schema = URIMessageSchema()
codec = PubSubMessageCodec(schema)
message = dumps({
"mediaType": "application/vnd.globality.pubsub.foo",
"opaqueData": {
"foo": "bar",
},
"uri": "http://example.com",
})
assert_that(codec.decode(message), is_(equal_to({
"media_type": "application/vnd.globality.pubsub.foo",
"opaque_data": dict(foo="bar"),
"uri": "http://example.com",
})))
def test_decode_identity_message_schema():
"""
Message decoding should process standard IdentityMessage fields.
"""
identity_schema = IdentityMessageSchema()
codec = PubSubMessageCodec(identity_schema)
message = dumps({
"mediaType": "application/vnd.globality.pubsub._.created.foo",
"opaqueData": {
"foo": "bar",
},
"id": "1",
})
assert_that(codec.decode(message), is_(equal_to({
"media_type": "application/vnd.globality.pubsub._.created.foo",
"opaque_data": dict(foo="bar"),
"id": "1",
})))
def test_publish_by_uri_convention():
"""
Message publishing can use this convention.
"""
def loader(metadata):
return dict(
sns_topic_arns=dict(
default="default",
)
)
graph = create_object_graph("example", testing=True, loader=loader)
published_time = str(time())
with patch("microcosm_pubsub.producer.time") as mocked_time:
mocked_time.return_value = published_time
graph.sns_producer.produce(created("foo"), uri="http://example.com", opaque_data=dict())
assert_that(graph.sns_producer.sns_client.publish.call_count, is_(equal_to(1)))
assert_that(graph.sns_producer.sns_client.publish.call_args[1]["TopicArn"], is_(equal_to("default")))
assert_that(loads(graph.sns_producer.sns_client.publish.call_args[1]["Message"]), is_(equal_to({
"mediaType": "application/vnd.globality.pubsub._.created.foo",
"uri": "http://example.com",
"opaqueData": {
"X-Request-Published": published_time,
},
})))
assert_that(graph.sns_producer.sns_client.publish.call_args[1]["MessageAttributes"], is_(equal_to({
"media_type": {
"DataType": "String",
"StringValue": "application/vnd.globality.pubsub._.created.foo"
},
})))
def test_publish_by_identity_convention():
"""
Message publishing can use this convention.
"""
def loader(metadata):
return dict(
sns_topic_arns=dict(
default="default",
)
)
graph = create_object_graph("example", testing=True, loader=loader)
published_time = time()
with patch("microcosm_pubsub.producer.time") as mocked_time:
mocked_time.return_value = published_time
graph.sns_producer.produce(deleted("foo"), id="1", opaque_data=dict())
assert_that(graph.sns_producer.sns_client.publish.call_count, is_(equal_to(1)))
assert_that(graph.sns_producer.sns_client.publish.call_args[1]["TopicArn"], is_(equal_to("default")))
assert_that(loads(graph.sns_producer.sns_client.publish.call_args[1]["Message"]), is_(equal_to({
"mediaType": "application/vnd.globality.pubsub._.deleted.foo",
"id": "1",
"opaqueData": {
"X-Request-Published": str(published_time),
},
})))
assert_that(graph.sns_producer.sns_client.publish.call_args[1]["MessageAttributes"], is_(equal_to({
"media_type": {
"DataType": "String",
"StringValue": "application/vnd.globality.pubsub._.deleted.foo"
},
})))
def test_dispatch_by_uri_convention():
"""
Message dispatch can use this convention.
"""
daemon = ExampleDaemon.create_for_testing()
graph = daemon.graph
media_type = created(Foo)
assert_that(
graph.pubsub_message_schema_registry.find(media_type).schema,
is_(instance_of(URIMessageSchema)),
)
assert_that(
graph.sqs_message_handler_registry.find(media_type, daemon.bound_handlers),
is_(equal_to(noop_handler)),
)
def test_dispatch_by_identity_convention():
"""
Message dispatch can use this convention.
"""
daemon = ExampleDaemon.create_for_testing()
graph = daemon.graph
media_type = deleted(Foo)
assert_that(
graph.pubsub_message_schema_registry.find(media_type).schema,
is_(instance_of(IdentityMessageSchema)),
)
assert_that(
graph.sqs_message_handler_registry.find(media_type, daemon.bound_handlers),
is_(equal_to(noop_handler)),
)
| |
"""
ansi.py
Experimental ANSI formatting engine.
"""
import re
class Formatter(object):
"""
Experimental ANSI formatter object.
"""
default_coloring = {
"red": "red",
"blue": "blue",
"green": "green",
"cyan": "cyan",
"magenta": "magenta",
"white": "white",
}
"""
The default coloring data to use in this formatter.
"""
def _handle_reset(format_state):
"""
Handler for ANSI code 0.
"""
format_state["background"] = None
format_state["foreground"] = None
format_state["strikethrough"] = False
format_state["underline"] = False
format_state["italics"] = False
format_state["bold"] = False
def _handle_strikethrough_on(format_state):
"""
Handler for ANSI code 9.
"""
format_state["strikethrough"] = True
def _handle_strikethrough_off(format_state):
"""
Handler for ANSI code 29.
"""
format_state["strikethrough"] = False
def _handle_inverse_off(format_state):
"""
Handler for ANSI code 27.
"""
format_state["inverse"] = False
def _handle_inverse_on(format_state):
"""
Handler for ANSI code 7.
"""
format_state["inverse"] = True
def _handle_bold_on(format_state):
"""
Handler for ANSI code 1.
"""
format_state["bold"] = True
def _handle_italics_on(format_state):
"""
Handler for ANSI code 3.
"""
format_state["italics"] = True
def _handle_underline_on(format_state):
"""
Handler for ANSI code 4.
"""
format_state["underline"] = True
def _handle_bold_off(format_state):
"""
Handler for ANSI code 22.
"""
format_state["bold"] = False
def _handle_italics_off(format_state):
"""
Handler for ANSI code 23.
"""
format_state["italics"] = False
def _handle_underline_off(format_state):
"""
Handler for ANSI code 24.
"""
format_state["underline"] = False
def _handle_underline_off(format_state):
"""
Handler for ANSI code 24.
"""
format_state["underline"] = False
def _handle_foreground_black(format_state):
"""
Handler for ANSI code 30.
"""
format_state["foreground"] = "black"
def _handle_foreground_red(format_state):
"""
Handler for ANSI code 31.
"""
format_state["foreground"] = "red"
def _handle_foreground_green(format_state):
"""
Handler for ANSI code 32.
"""
format_state["foreground"] = "green"
def _handle_foreground_yellow(format_state):
"""
Handler for ANSI code 33.
"""
format_state["foreground"] = "yellow"
def _handle_foreground_blue(format_state):
"""
Handler for ANSI code 34.
"""
format_state["foreground"] = "blue"
def _handle_foreground_magenta(format_state):
"""
Handler for ANSI code 35.
"""
format_state["foreground"] = "magenta"
def _handle_foreground_cyan(format_state):
"""
Handler for ANSI code 36.
"""
format_state["foreground"] = "cyan"
def _handle_foreground_white(format_state):
"""
Handler for ANSI code 37.
"""
format_state["foreground"] = "white"
def _handle_foreground_reset(format_state):
"""
Handler for ANSI code 39.
"""
format_state["foreground"] = None
def _handle_background_black(format_state):
"""
Handler for ANSI code 40.
"""
format_state["background"] = "black"
def _handle_background_red(format_state):
"""
Handler for ANSI code 41.
"""
format_state["background"] = "red"
def _handle_background_green(format_state):
"""
Handler for ANSI code 42.
"""
format_state["background"] = "green"
def _handle_background_yellow(format_state):
"""
Handler for ANSI code 43.
"""
format_state["background"] = "yellow"
def _handle_background_blue(format_state):
"""
Handler for ANSI code 44.
"""
format_state["background"] = "blue"
def _handle_background_magenta(format_state):
"""
Handler for ANSI code 45.
"""
format_state["background"] = "magenta"
def _handle_background_cyan(format_state):
"""
Handler for ANSI code 46.
"""
format_state["background"] = "cyan"
def _handle_background_white(format_state):
"""
Handler for ANSI code 47.
"""
format_state["background"] = "white"
def _handle_background_reset(format_state):
"""
Handler for ANSI code 49.
"""
format_state["background"] = None
# All of our ansi handling buddies
ansi_handlers = {
# Reset everything
0: _handle_reset,
1: _handle_bold_on,
3: _handle_italics_on,
4: _handle_underline_on,
7: _handle_inverse_on,
9: _handle_strikethrough_on,
22: _handle_bold_off,
23: _handle_italics_off,
24: _handle_underline_off,
27: _handle_inverse_off,
29: _handle_strikethrough_off,
30: _handle_foreground_black,
31: _handle_foreground_red,
32: _handle_foreground_green,
33: _handle_foreground_yellow,
34: _handle_foreground_blue,
35: _handle_foreground_magenta,
36: _handle_foreground_cyan,
37: _handle_foreground_white,
39: _handle_foreground_reset,
40: _handle_background_black,
41: _handle_background_red,
42: _handle_background_green,
43: _handle_background_yellow,
44: _handle_background_blue,
45: _handle_background_magenta,
46: _handle_background_cyan,
47: _handle_background_white,
49: _handle_background_reset,
}
def process_formatting(self, input_text, coloring=None):
# We keep track of the format state on a per segment basis
current_format = {
# Current background color
"background": None,
# Current foreground color
"foreground": None,
# Strikethrough enabled?
"strikethrough": False,
# Underline enabled?
"underline": False,
# Italics enabled?
"italics": False,
# Bold enabled?
"bold": False,
# Inverse enabled?
"inverse": False,
}
# A list of in-order tuples containing the raw text and the format state
segments = []
current_index = 0
# Use the defaults if not specified.
if coloring is None:
coloring = self.default_coloring
# We process the input text looking for all sequences that look like this:
ansi_regex = re.compile("\x1b\\[([0-9]+\\;)?[0-9]+m")
beginning_regex = re.compile("\x1b\\[([0-9]+\\;)?")
for match in re.finditer(ansi_regex, input_text):
start_index = match.start()
end_index = match.end()
# When we find a good match, we only want the numeric code
# FIXME: What does the ##; mean?
ansi_numeric = match.group(0).rstrip("m")
ansi_numeric = int(re.sub(beginning_regex, "", ansi_numeric))
# When we have a valid ANSI sequence, we push back the old format state and buffer
if ansi_numeric in self.ansi_handlers:
# Grab all of the text affected by the CURRENT formatter and stow it away
old_text = input_text[current_index:start_index]
current_index = end_index
segments.append((old_text, dict(current_format)))
# The handler writes directly to this current format data
handler = self.ansi_handlers[ansi_numeric]
if handler is not None:
handler(current_format)
else:
print("*** ANSI warning: Found known ANSI format code %u, but is it not implemented." % ansi_numeric)
else:
print("*** ANSI warning: Found unknown ANSI format code %u." % ansi_numeric)
# Once we hit the end, we grab any remaining text and create a formatter entry for it
segments.append((input_text[current_index:], current_format))
# Foreach segment, build a pango text attribute block
result = ""
for text_buffer, format_data in segments:
# FIXME: Build this more programmatically
if format_data["bold"]:
text_buffer = "<b>%s</b>" % text_buffer
if format_data["italics"]:
text_buffer = "<i>%s</i>" % text_buffer
if format_data["strikethrough"]:
text_buffer = "<s>%s</s>" % text_buffer
if format_data["underline"]:
text_buffer = "<u>%s</u>" % text_buffer
foreground_color = format_data["foreground"]
background_color = format_data["background"]
# Swap our colors if necessary
if format_data["inverse"]:
temporary = foreground_color
foreground_color = background_color
background_color = temporary
# Add a span block if we have colors
span_body = ""
if foreground_color is not None:
span_body += "foreground=\"%s\"" % foreground_color
if background_color is not None:
span_body += "background=\"%s\"" % background_color
if span_body != "":
text_buffer = "<span %s>%s</span>" % (span_body, text_buffer)
result += text_buffer
return result
| |
'''
Configuration object
====================
The :class:`Config` object is an instance of a modified Python ConfigParser.
See the `ConfigParser documentation
<http://docs.python.org/library/configparser.html>`_ for more information.
Kivy has a configuration file which determines the default settings. In
order to change these settings, you can alter this file manually or use
the Config object. Please see the :ref:`Configure Kivy` section for more
information.
Applying configurations
-----------------------
Configuration options control the initialization of the :class:`~kivy.app.App`.
In order to avoid situations where the config settings do not work or are not
applied before window creation (like setting an initial window size),
:meth:`Config.set <kivy.config.ConfigParser.set>` should be used before
importing any other Kivy modules. Ideally, this means setting them right at
the start of your main.py script.
Alternatively, you can save these settings permanently using
:meth:`Config.set <ConfigParser.set>` then
:meth:`Config.write <ConfigParser.write>`. In this case, you will need to
restart the app for the changes to take effect. Note that this approach will
effect all Kivy apps system wide.
Please note that no underscores (`_`) are allowed in the section name.
Usage of the Config object
--------------------------
To read a configuration token from a particular section::
>>> from kivy.config import Config
>>> Config.getint('kivy', 'show_fps')
0
Change the configuration and save it::
>>> Config.set('postproc', 'retain_time', '50')
>>> Config.write()
For information on configuring your :class:`~kivy.app.App`, please see the
:ref:`Application configuration` section.
.. versionchanged:: 1.7.1
The ConfigParser should work correctly with utf-8 now. The values are
converted from ascii to unicode only when needed. The method get() returns
utf-8 strings.
Changing configuration with environment variables
-------------------------------------------------
Since 1.11.0, it is now possible to change the configuration using
environment variables. They take precedence on the loaded config.ini.
The format is::
KCFG_<section>_<key> = <value>
For example:
KCFG_GRAPHICS_FULLSCREEN=auto ...
KCFG_KIVY_LOG_LEVEL=warning ...
Or in your file before any kivy import:
import os
os.environ["KCFG_KIVY_LOG_LEVEL"] = "warning"
If you don't want to map any environment variables, you can disable
the behavior::
os.environ["KIVY_NO_ENV_CONFIG"] = "1"
.. _configuration-tokens:
Available configuration tokens
------------------------------
.. |log_levels| replace::
'trace', 'debug', 'info', 'warning', 'error' or 'critical'
:kivy:
`default_font`: list
Default fonts used for widgets displaying any text. It defaults to
['Roboto', 'data/fonts/Roboto-Regular.ttf',
'data/fonts/Roboto-Italic.ttf', 'data/fonts/Roboto-Bold.ttf',
'data/fonts/Roboto-BoldItalic.ttf'].
`desktop`: int, 0 or 1
This option controls desktop OS specific features, such as enabling
drag-able scroll-bar in scroll views, disabling of bubbles in
TextInput etc. 0 is disabled, 1 is enabled.
`exit_on_escape`: int, 0 or 1
Enables exiting kivy when escape is pressed.
0 is disabled, 1 is enabled.
`pause_on_minimize`: int, 0 or 1
If set to `1`, the main loop is paused and the `on_pause` event
is dispatched when the window is minimized. This option is intended
for desktop use only. Defaults to `0`.
`keyboard_layout`: string
Identifier of the layout to use.
`keyboard_mode`: string
Specifies the keyboard mode to use. If can be one of the following:
* '' - Let Kivy choose the best option for your current platform.
* 'system' - real keyboard.
* 'dock' - one virtual keyboard docked to a screen side.
* 'multi' - one virtual keyboard for every widget request.
* 'systemanddock' - virtual docked keyboard plus input from real
keyboard.
* 'systemandmulti' - analogous.
`kivy_clock`: one of `default`, `interrupt`, `free_all`, `free_only`
The clock type to use with kivy. See :mod:`kivy.clock`.
`log_dir`: string
Path of log directory.
`log_enable`: int, 0 or 1
Activate file logging. 0 is disabled, 1 is enabled.
`log_level`: string, one of |log_levels|
Set the minimum log level to use.
`log_name`: string
Format string to use for the filename of log file.
`log_maxfiles`: int
Keep log_maxfiles recent logfiles while purging the log directory. Set
'log_maxfiles' to -1 to disable logfile purging (eg keep all logfiles).
.. note::
You end up with 'log_maxfiles + 1' logfiles because the logger
adds a new one after purging.
`window_icon`: string
Path of the window icon. Use this if you want to replace the default
pygame icon.
:postproc:
`double_tap_distance`: float
Maximum distance allowed for a double tap, normalized inside the range
0 - 1000.
`double_tap_time`: int
Time allowed for the detection of double tap, in milliseconds.
`ignore`: list of tuples
List of regions where new touches are ignored.
This configuration token can be used to resolve hotspot problems
with DIY hardware. The format of the list must be::
ignore = [(xmin, ymin, xmax, ymax), ...]
All the values must be inside the range 0 - 1.
`jitter_distance`: int
Maximum distance for jitter detection, normalized inside the range 0
- 1000.
`jitter_ignore_devices`: string, separated with commas
List of devices to ignore from jitter detection.
`retain_distance`: int
If the touch moves more than is indicated by retain_distance, it will
not be retained. Argument should be an int between 0 and 1000.
`retain_time`: int
Time allowed for a retain touch, in milliseconds.
`triple_tap_distance`: float
Maximum distance allowed for a triple tap, normalized inside the range
0 - 1000.
`triple_tap_time`: int
Time allowed for the detection of triple tap, in milliseconds.
:graphics:
`borderless`: int , one of 0 or 1
If set to `1`, removes the window border/decoration. Window resizing
must also be disabled to hide the resizing border.
`window_state`: string , one of 'visible', 'hidden', 'maximized'
or 'minimized'
Sets the window state, defaults to 'visible'. This option is available
only for the SDL2 window provider and it should be used on desktop
OSes.
`fbo`: string, one of 'hardware', 'software' or 'force-hardware'
Selects the FBO backend to use.
`fullscreen`: int or string, one of 0, 1, 'fake' or 'auto'
Activate fullscreen. If set to `1`, a resolution of `width`
times `height` pixels will be used.
If set to `auto`, your current display's resolution will be
used instead. This is most likely what you want.
If you want to place the window in another display,
use `fake`, or set the `borderless` option from the graphics section,
then adjust `width`, `height`, `top` and `left`.
`height`: int
Height of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`left`: int
Left position of the :class:`~kivy.core.window.Window`.
`maxfps`: int, defaults to 60
Maximum FPS allowed.
.. warning::
Setting maxfps to 0 will lead to max CPU usage.
'multisamples': int, defaults to 2
Sets the `MultiSample Anti-Aliasing (MSAA)
<http://en.wikipedia.org/wiki/Multisample_anti-aliasing>`_ level.
Increasing this value results in smoother graphics but at the cost of
processing time.
.. note::
This feature is limited by device hardware support and will have no
effect on devices which do not support the level of MSAA requested.
`position`: string, one of 'auto' or 'custom'
Position of the window on your display. If `auto` is used, you have no
control of the initial position: `top` and `left` are ignored.
`show_cursor`: int, one of 0 or 1
Set whether or not the cursor is shown on the window.
`top`: int
Top position of the :class:`~kivy.core.window.Window`.
`resizable`: int, one of 0 or 1
If 0, the window will have a fixed size. If 1, the window will be
resizable.
`rotation`: int, one of 0, 90, 180 or 270
Rotation of the :class:`~kivy.core.window.Window`.
`width`: int
Width of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`minimum_width`: int
Minimum width to restrict the window to. (sdl2 only)
`minimum_height`: int
Minimum height to restrict the window to. (sdl2 only)
`min_state_time`: float, defaults to .035
Minimum time for widgets to display a given visual state.
This attrib is currently used by widgets like
:class:`~kivy.uix.dropdown.DropDown` &
:class:`~kivy.uix.behaviors.buttonbehavior.ButtonBehavior` to
make sure they display their current visual state for the given
time.
`allow_screensaver`: int, one of 0 or 1, defaults to 1
Allow the device to show a screen saver, or to go to sleep
on mobile devices. Only works for the sdl2 window provider.
:input:
You can create new input devices using this syntax::
# example of input provider instance
yourid = providerid,parameters
# example for tuio provider
default = tuio,127.0.0.1:3333
mytable = tuio,192.168.0.1:3334
.. seealso::
Check the providers in :mod:`kivy.input.providers` for the syntax to
use inside the configuration file.
:widgets:
`scroll_distance`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_distance`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_friction`: float
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_friction`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_timeout`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_timeout`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_stoptime`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_stoptime`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_moves`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_moves`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
:modules:
You can activate modules with this syntax::
modulename =
Anything after the = will be passed to the module as arguments.
Check the specific module's documentation for a list of accepted
arguments.
.. versionchanged:: 1.10.0
`min_state_time` and `allow_screensaver` have been added
to the `graphics` section.
`kivy_clock` has been added to the kivy section.
`default_font` has beed added to the kivy section.
.. versionchanged:: 1.9.0
`borderless` and `window_state` have been added to the graphics section.
The `fake` setting of the `fullscreen` option has been deprecated,
use the `borderless` option instead.
`pause_on_minimize` has been added to the kivy section.
.. versionchanged:: 1.8.0
`systemanddock` and `systemandmulti` has been added as possible values for
`keyboard_mode` in the kivy section. `exit_on_escape` has been added
to the kivy section.
.. versionchanged:: 1.2.0
`resizable` has been added to graphics section.
.. versionchanged:: 1.1.0
tuio no longer listens by default. Window icons are not copied to
user directory anymore. You can still set a new window icon by using the
``window_icon`` config setting.
.. versionchanged:: 1.0.8
`scroll_timeout`, `scroll_distance` and `scroll_friction` have been added.
`list_friction`, `list_trigger_distance` and `list_friction_bound`
have been removed. `keyboard_type` and `keyboard_layout` have been
removed from the widget. `keyboard_mode` and `keyboard_layout` have
been added to the kivy section.
'''
__all__ = ('Config', 'ConfigParser')
try:
from ConfigParser import ConfigParser as PythonConfigParser
except ImportError:
from configparser import RawConfigParser as PythonConfigParser
from os import environ
from os.path import exists
from kivy import kivy_config_fn
from kivy.logger import Logger, logger_config_update
from collections import OrderedDict
from kivy.utils import platform
from kivy.compat import PY2, string_types
from weakref import ref
_is_rpi = exists('/opt/vc/include/bcm_host.h')
# Version number of current configuration format
KIVY_CONFIG_VERSION = 21
Config = None
'''The default Kivy configuration object. This is a :class:`ConfigParser`
instance with the :attr:`~kivy.config.ConfigParser.name` set to 'kivy'.
.. code-block:: python
Config = ConfigParser(name='kivy')
'''
class ConfigParser(PythonConfigParser, object):
'''Enhanced ConfigParser class that supports the addition of default
sections and default values.
By default, the kivy ConfigParser instance, :attr:`~kivy.config.Config`,
is named `'kivy'` and the ConfigParser instance used by the
:meth:`App.build_settings <~kivy.app.App.build_settings>` method is named
`'app'`.
:Parameters:
`name`: string
The name of the instance. See :attr:`name`. Defaults to `''`.
.. versionchanged:: 1.9.0
Each ConfigParser can now be :attr:`named <name>`. You can get the
ConfigParser associated with a name using :meth:`get_configparser`.
In addition, you can now control the config values with
:class:`~kivy.properties.ConfigParserProperty`.
.. versionadded:: 1.0.7
'''
def __init__(self, name='', **kwargs):
PythonConfigParser.__init__(self, **kwargs)
self._sections = OrderedDict()
self.filename = None
self._callbacks = []
self.name = name
def add_callback(self, callback, section=None, key=None):
'''Add a callback to be called when a specific section or key has
changed. If you don't specify a section or key, it will call the
callback for all section/key changes.
Callbacks will receive 3 arguments: the section, key and value.
.. versionadded:: 1.4.1
'''
if section is None and key is not None:
raise Exception('You cannot specify a key without a section')
self._callbacks.append((callback, section, key))
def remove_callback(self, callback, section=None, key=None):
'''Removes a callback added with :meth:`add_callback`.
:meth:`remove_callback` must be called with the same parameters as
:meth:`add_callback`.
Raises a `ValueError` if not found.
.. versionadded:: 1.9.0
'''
self._callbacks.remove((callback, section, key))
def _do_callbacks(self, section, key, value):
for callback, csection, ckey in self._callbacks:
if csection is not None and csection != section:
continue
elif ckey is not None and ckey != key:
continue
callback(section, key, value)
def read(self, filename):
'''Read only one filename. In contrast to the original ConfigParser of
Python, this one is able to read only one file at a time. The last
read file will be used for the :meth:`write` method.
.. versionchanged:: 1.9.0
:meth:`read` now calls the callbacks if read changed any values.
'''
if not isinstance(filename, string_types):
raise Exception('Only one filename is accepted ({})'.format(
string_types.__name__))
self.filename = filename
# If we try to open directly the configuration file in utf-8,
# we correctly get the unicode value by default.
# But, when we try to save it again, all the values we didn't changed
# are still unicode, and then the PythonConfigParser internal do
# a str() conversion -> fail.
# Instead we currently to the conversion to utf-8 when value are
# "get()", but we internally store them in ascii.
# with codecs.open(filename, 'r', encoding='utf-8') as f:
# self.readfp(f)
old_vals = {sect: {k: v for k, v in self.items(sect)} for sect in
self.sections()}
PythonConfigParser.read(self, filename)
# when reading new file, sections/keys are only increased, not removed
f = self._do_callbacks
for section in self.sections():
if section not in old_vals: # new section
for k, v in self.items(section):
f(section, k, v)
continue
old_keys = old_vals[section]
for k, v in self.items(section): # just update new/changed keys
if k not in old_keys or v != old_keys[k]:
f(section, k, v)
def set(self, section, option, value):
'''Functions similarly to PythonConfigParser's set method, except that
the value is implicitly converted to a string.
'''
e_value = value
if not isinstance(value, string_types):
# might be boolean, int, etc.
e_value = str(value)
if PY2:
if isinstance(value, unicode):
e_value = value.encode('utf-8')
ret = PythonConfigParser.set(self, section, option, e_value)
self._do_callbacks(section, option, value)
return ret
def setall(self, section, keyvalues):
'''Sets multiple key-value pairs in a section. keyvalues should be a
dictionary containing the key-value pairs to be set.
'''
for key, value in keyvalues.items():
self.set(section, key, value)
def get(self, section, option, **kwargs):
value = PythonConfigParser.get(self, section, option, **kwargs)
if PY2:
if type(value) is str:
return value.decode('utf-8')
return value
def setdefaults(self, section, keyvalues):
'''Set multiple key-value defaults in a section. keyvalues should be
a dictionary containing the new key-value defaults.
'''
self.adddefaultsection(section)
for key, value in keyvalues.items():
self.setdefault(section, key, value)
def setdefault(self, section, option, value):
'''Set the default value for an option in the specified section.
'''
if self.has_option(section, option):
return
self.set(section, option, value)
def getdefault(self, section, option, defaultvalue):
'''Get the value of an option in the specified section. If not found,
it will return the default value.
'''
if not self.has_section(section):
return defaultvalue
if not self.has_option(section, option):
return defaultvalue
return self.get(section, option)
def getdefaultint(self, section, option, defaultvalue):
'''Get the value of an option in the specified section. If not found,
it will return the default value. The value will always be
returned as an integer.
.. versionadded:: 1.6.0
'''
return int(self.getdefault(section, option, defaultvalue))
def adddefaultsection(self, section):
'''Add a section if the section is missing.
'''
assert("_" not in section)
if self.has_section(section):
return
self.add_section(section)
def write(self):
'''Write the configuration to the last file opened using the
:meth:`read` method.
Return True if the write finished successfully, False otherwise.
'''
if self.filename is None:
return False
try:
with open(self.filename, 'w') as fd:
PythonConfigParser.write(self, fd)
except IOError:
Logger.exception('Unable to write the config <%s>' % self.filename)
return False
return True
def update_config(self, filename, overwrite=False):
'''Upgrade the configuration based on a new default config file.
Overwrite any existing values if overwrite is True.
'''
pcp = PythonConfigParser()
pcp.read(filename)
confset = self.setall if overwrite else self.setdefaults
for section in pcp.sections():
confset(section, dict(pcp.items(section)))
self.write()
@staticmethod
def _register_named_property(name, widget_ref, *largs):
''' Called by the ConfigParserProperty to register a property which
was created with a config name instead of a config object.
When a ConfigParser with this name is later created, the properties
are then notified that this parser now exists so they can use it.
If the parser already exists, the property is notified here. See
:meth:`~kivy.properties.ConfigParserProperty.set_config`.
:Parameters:
`name`: a non-empty string
The name of the ConfigParser that is associated with the
property. See :attr:`name`.
`widget_ref`: 2-tuple.
The first element is a reference to the widget containing the
property, the second element is the name of the property. E.g.:
class House(Widget):
address = ConfigParserProperty('', 'info', 'street',
'directory')
Then, the first element is a ref to a House instance, and the
second is `'address'`.
'''
configs = ConfigParser._named_configs
try:
config, props = configs[name]
except KeyError:
configs[name] = (None, [widget_ref])
return
props.append(widget_ref)
if config:
config = config()
widget = widget_ref[0]()
if config and widget: # associate this config with property
widget.property(widget_ref[1]).set_config(config)
@staticmethod
def get_configparser(name):
'''Returns the :class:`ConfigParser` instance whose name is `name`, or
None if not found.
:Parameters:
`name`: string
The name of the :class:`ConfigParser` instance to return.
'''
try:
config = ConfigParser._named_configs[name][0]
if config is not None:
config = config()
if config is not None:
return config
del ConfigParser._named_configs[name]
except KeyError:
return None
# keys are configparser names, values are 2-tuple of (ref(configparser),
# widget_ref), where widget_ref is same as in _register_named_property
_named_configs = {}
_name = ''
@property
def name(self):
''' The name associated with this ConfigParser instance, if not `''`.
Defaults to `''`. It can be safely changed dynamically or set to `''`.
When a ConfigParser is given a name, that config object can be
retrieved using :meth:`get_configparser`. In addition, that config
instance can also be used with a
:class:`~kivy.properties.ConfigParserProperty` instance that set its
`config` value to this name.
Setting more than one ConfigParser with the same name will raise a
`ValueError`.
'''
return self._name
@name.setter
def name(self, value):
old_name = self._name
if value is old_name:
return
self._name = value
configs = ConfigParser._named_configs
if old_name: # disconnect this parser from previously connected props
_, props = configs.get(old_name, (None, []))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(None)
configs[old_name] = (None, props)
if not value:
return
# if given new name, connect it with property that used this name
try:
config, props = configs[value]
except KeyError:
configs[value] = (ref(self), [])
return
if config is not None and config() is not None:
raise ValueError('A parser named {} already exists'.format(value))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(self)
configs[value] = (ref(self), props)
if not environ.get('KIVY_DOC_INCLUDE'):
#
# Read, analyse configuration file
# Support upgrade of older config file versions
#
# Create default configuration
Config = ConfigParser(name='kivy')
Config.add_callback(logger_config_update, 'kivy', 'log_level')
# Read config file if exist
if (exists(kivy_config_fn) and
'KIVY_USE_DEFAULTCONFIG' not in environ and
'KIVY_NO_CONFIG' not in environ):
try:
Config.read(kivy_config_fn)
except Exception as e:
Logger.exception('Core: error while reading local'
'configuration')
version = Config.getdefaultint('kivy', 'config_version', 0)
# Add defaults section
Config.adddefaultsection('kivy')
Config.adddefaultsection('graphics')
Config.adddefaultsection('input')
Config.adddefaultsection('postproc')
Config.adddefaultsection('widgets')
Config.adddefaultsection('modules')
Config.adddefaultsection('network')
# Upgrade default configuration until we have the current version
need_save = False
if version != KIVY_CONFIG_VERSION and 'KIVY_NO_CONFIG' not in environ:
Logger.warning('Config: Older configuration version detected'
' ({0} instead of {1})'.format(
version, KIVY_CONFIG_VERSION))
Logger.warning('Config: Upgrading configuration in progress.')
need_save = True
while version < KIVY_CONFIG_VERSION:
Logger.debug('Config: Upgrading from %d to %d' %
(version, version + 1))
if version == 0:
# log level
Config.setdefault('kivy', 'keyboard_repeat_delay', '300')
Config.setdefault('kivy', 'keyboard_repeat_rate', '30')
Config.setdefault('kivy', 'log_dir', 'logs')
Config.setdefault('kivy', 'log_enable', '1')
Config.setdefault('kivy', 'log_level', 'info')
Config.setdefault('kivy', 'log_name', 'kivy_%y-%m-%d_%_.txt')
Config.setdefault('kivy', 'window_icon', '')
# default graphics parameters
Config.setdefault('graphics', 'display', '-1')
Config.setdefault('graphics', 'fullscreen', 'no')
Config.setdefault('graphics', 'height', '600')
Config.setdefault('graphics', 'left', '0')
Config.setdefault('graphics', 'maxfps', '0')
Config.setdefault('graphics', 'multisamples', '2')
Config.setdefault('graphics', 'position', 'auto')
Config.setdefault('graphics', 'rotation', '0')
Config.setdefault('graphics', 'show_cursor', '1')
Config.setdefault('graphics', 'top', '0')
Config.setdefault('graphics', 'vsync', '1')
Config.setdefault('graphics', 'width', '800')
# input configuration
Config.setdefault('input', 'mouse', 'mouse')
# activate native input provider in configuration
# from 1.0.9, don't activate mactouch by default, or app are
# unusable.
if platform == 'win':
Config.setdefault('input', 'wm_touch', 'wm_touch')
Config.setdefault('input', 'wm_pen', 'wm_pen')
elif platform == 'linux':
probesysfs = 'probesysfs'
if _is_rpi:
probesysfs += ',provider=hidinput'
Config.setdefault('input', '%(name)s', probesysfs)
# input postprocessing configuration
Config.setdefault('postproc', 'double_tap_distance', '20')
Config.setdefault('postproc', 'double_tap_time', '250')
Config.setdefault('postproc', 'ignore', '[]')
Config.setdefault('postproc', 'jitter_distance', '0')
Config.setdefault('postproc', 'jitter_ignore_devices',
'mouse,mactouch,')
Config.setdefault('postproc', 'retain_distance', '50')
Config.setdefault('postproc', 'retain_time', '0')
# default configuration for keyboard repetition
Config.setdefault('widgets', 'keyboard_layout', 'qwerty')
Config.setdefault('widgets', 'keyboard_type', '')
Config.setdefault('widgets', 'list_friction', '10')
Config.setdefault('widgets', 'list_friction_bound', '20')
Config.setdefault('widgets', 'list_trigger_distance', '5')
elif version == 1:
Config.remove_option('graphics', 'vsync')
Config.set('graphics', 'maxfps', '60')
elif version == 2:
# was a version to automatically copy windows icon in the user
# directory, but it's now not used anymore. User can still change
# the window icon by touching the config.
pass
elif version == 3:
# add token for scrollview
Config.setdefault('widgets', 'scroll_timeout', '55')
Config.setdefault('widgets', 'scroll_distance', '20')
Config.setdefault('widgets', 'scroll_friction', '1.')
# remove old list_* token
Config.remove_option('widgets', 'list_friction')
Config.remove_option('widgets', 'list_friction_bound')
Config.remove_option('widgets', 'list_trigger_distance')
elif version == 4:
Config.remove_option('widgets', 'keyboard_type')
Config.remove_option('widgets', 'keyboard_layout')
# add keyboard token
Config.setdefault('kivy', 'keyboard_mode', '')
Config.setdefault('kivy', 'keyboard_layout', 'qwerty')
elif version == 5:
Config.setdefault('graphics', 'resizable', '1')
elif version == 6:
# if the timeout is still the default value, change it
Config.setdefault('widgets', 'scroll_stoptime', '300')
Config.setdefault('widgets', 'scroll_moves', '5')
elif version == 7:
# desktop bool indicating whether to use desktop specific features
is_desktop = int(platform in ('win', 'macosx', 'linux'))
Config.setdefault('kivy', 'desktop', is_desktop)
Config.setdefault('postproc', 'triple_tap_distance', '20')
Config.setdefault('postproc', 'triple_tap_time', '375')
elif version == 8:
if Config.getint('widgets', 'scroll_timeout') == 55:
Config.set('widgets', 'scroll_timeout', '250')
elif version == 9:
Config.setdefault('kivy', 'exit_on_escape', '1')
elif version == 10:
Config.set('graphics', 'fullscreen', '0')
Config.setdefault('graphics', 'borderless', '0')
elif version == 11:
Config.setdefault('kivy', 'pause_on_minimize', '0')
elif version == 12:
Config.setdefault('graphics', 'window_state', 'visible')
elif version == 13:
Config.setdefault('graphics', 'minimum_width', '0')
Config.setdefault('graphics', 'minimum_height', '0')
elif version == 14:
Config.setdefault('graphics', 'min_state_time', '.035')
elif version == 15:
Config.setdefault('kivy', 'kivy_clock', 'default')
elif version == 16:
Config.setdefault('kivy', 'default_font', [
'Roboto',
'data/fonts/Roboto-Regular.ttf',
'data/fonts/Roboto-Italic.ttf',
'data/fonts/Roboto-Bold.ttf',
'data/fonts/Roboto-BoldItalic.ttf'])
elif version == 17:
Config.setdefault('graphics', 'allow_screensaver', '1')
elif version == 18:
Config.setdefault('kivy', 'log_maxfiles', '100')
elif version == 19:
Config.setdefault('graphics', 'shaped', '0')
Config.setdefault(
'kivy', 'window_shape',
'data/images/defaultshape.png'
)
elif version == 20:
Config.setdefault('network', 'useragent', 'curl')
else:
# for future.
break
# Pass to the next version
version += 1
# Indicate to the Config that we've upgrade to the latest version.
Config.set('kivy', 'config_version', KIVY_CONFIG_VERSION)
# Now, activate log file
Logger.logfile_activated = bool(Config.getint('kivy', 'log_enable'))
# If no configuration exist, write the default one.
if ((not exists(kivy_config_fn) or need_save) and
'KIVY_NO_CONFIG' not in environ):
try:
Config.filename = kivy_config_fn
Config.write()
except Exception as e:
Logger.exception('Core: Error while saving default config file')
# Load configuration from env
if environ.get('KIVY_NO_ENV_CONFIG', '0') != '1':
for key, value in environ.items():
if not key.startswith("KCFG_"):
continue
try:
_, section, name = key.split("_", 2)
except ValueError:
Logger.warning((
"Config: Environ `{}` invalid format, "
"must be KCFG_section_name").format(key))
continue
# extract and check section
section = section.lower()
if not Config.has_section(section):
Logger.warning(
"Config: Environ `{}`: unknown section `{}`".format(
key, section))
continue
# extract and check the option name
name = name.lower()
sections_to_check = {
"kivy", "graphics", "widgets", "postproc", "network"}
if (section in sections_to_check and
not Config.has_option(section, name)):
Logger.warning((
"Config: Environ `{}` unknown `{}` "
"option in `{}` section.").format(
key, name, section))
# we don't avoid to set an unknown option, because maybe
# an external modules or widgets (in garden?) may want to
# save its own configuration here.
Config.set(section, name, value)
| |
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
class ProxyError(atom.http_interface.Error):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringType):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url))
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringType):
all_headers['Content-Length'] = len(data)
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringType):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.get_request_uri()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
proxy = os.environ.get('http_proxy')
if url.protocol == 'http' and proxy:
return url.to_string()
else:
return url.get_request_uri()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringType):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import itertools
import random
import sys
import time
import cryptography
import glanceclient
from glanceclient.common import http
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import sslutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _LE, _LI, _LW
import nova.image.download as image_xfers
from nova import objects
from nova import signature_utils
glance_opts = [
cfg.StrOpt('host',
default='$my_ip',
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='Glance server hostname or IP address'),
cfg.IntOpt('port',
default=9292,
min=1,
max=65535,
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='Glance server port'),
cfg.StrOpt('protocol',
default='http',
choices=('http', 'https'),
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='Protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('api_servers',
help='''
A list of the glance api servers endpoints available to nova. These
should be fully qualified urls of the form
"scheme://hostname:port[/path]" (i.e. "http://10.0.1.0:9292" or
"https://my.glance.server/image")'''),
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('num_retries',
default=0,
help='Number of retries when uploading / downloading an image '
'to / from glance.'),
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
cfg.BoolOpt('verify_glance_signatures',
default=False,
help='Require Nova to perform signature verification on '
'each image downloaded from Glance.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts, 'glance')
CONF.import_opt('auth_strategy', 'nova.api.auth')
CONF.import_opt('my_ip', 'nova.netconf')
supported_glance_versions = (1, 2)
def generate_glance_url():
"""Generate the URL to glance."""
glance_host = CONF.glance.host
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
return "%s://%s:%d" % (CONF.glance.protocol, glance_host,
CONF.glance.port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _endpoint_from_image_ref(image_href):
"""Return the image_ref and guessed endpoint from an image url.
:param image_href: href of an image
:returns: a tuple of the form (image_id, endpoint_url)
"""
parts = image_href.split('/')
image_id = parts[-1]
# the endpoint is everything in the url except the last 3 bits
# which are version, 'images', and image_id
endpoint = '/'.join(parts[:-3])
return (image_id, endpoint)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user', None),
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(getattr(context, 'roles', [])),
'X-Identity-Status': status,
}
def _glanceclient_from_endpoint(context, endpoint, version=1):
"""Instantiate a new glanceclient.Client object."""
params = {}
# NOTE(sdague): even if we aren't using keystone, it doesn't
# hurt to send these headers.
params['identity_headers'] = generate_identity_headers(context)
if endpoint.startswith('https://'):
# https specific params
params['insecure'] = CONF.glance.api_insecure
params['ssl_compression'] = False
sslutils.is_enabled(CONF)
if CONF.ssl.cert_file:
params['cert_file'] = CONF.ssl.cert_file
if CONF.ssl.key_file:
params['key_file'] = CONF.ssl.key_file
if CONF.ssl.ca_file:
params['cacert'] = CONF.ssl.ca_file
return glanceclient.Client(str(version), endpoint, **params)
def _determine_curr_major_version(endpoint):
"""Determines the current major version of the glance API in use
:returns Integer version number or None if unable to determine version
"""
http_client = http.HTTPClient(endpoint)
try:
response, content = http_client.get('/versions')
for version in content['versions']:
if version['status'] == 'CURRENT':
res = version['id']
# The 'id' value looks like "v2.2",
# so grab the major version number which is 2 in this case
res = int(res[1:res.find(".")])
return res if res in supported_glance_versions else None
except Exception:
LOG.error(_LE("Unable to determine the glance API version"))
def get_api_servers():
"""Shuffle a list of CONF.glance.api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
configured_servers = ([generate_glance_url()]
if CONF.glance.api_servers is None
else CONF.glance.api_servers)
for api_server in configured_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
# NOTE(sdague): remove in N.
LOG.warn(
_LW("No protocol specified in for api_server '%s', "
"please update [glance] api_servers with fully "
"qualified url including scheme (http / https)"),
api_server)
api_servers.append(api_server)
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, endpoint=None, version=1):
if endpoint is not None:
self.client = self._create_static_client(context,
endpoint,
version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, endpoint, version):
"""Create a client that we'll use for every call."""
self.api_server = str(endpoint)
return _glanceclient_from_endpoint(context, endpoint, version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.api_server = next(self.api_servers)
return _glanceclient_from_endpoint(context, self.api_server, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance.num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
retries = CONF.glance.num_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'glance.num_retries' as 0."),
{'retries': retries})
retries = 0
num_attempts = retries + 1
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
return getattr(client.images, method)(*args, **kwargs)
except retry_excs as e:
if attempt < num_attempts:
extra = "retrying"
else:
extra = 'done trying'
LOG.exception(_LE("Error contacting glance server "
"'%(server)s' for '%(method)s', "
"%(extra)s."),
{'server': self.api_server,
'method': method, 'extra': extra})
if attempt == num_attempts:
raise exception.GlanceConnectionFailed(
server=str(self.api_server), reason=six.text_type(e))
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
# NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in six.iteritems(download_modules):
if scheme not in CONF.glance.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
LOG.error(_LE('When loading the module %(module_str)s the '
'following error occurred: %(ex)s'),
{'module_str': str(mod), 'ex': ex})
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params(kwargs)
try:
images = self._client.call(context, 1, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Returns a dict with image data for the given opaque image id.
:param context: The context object to pass to image client
:param image_id: The UUID of the image
:param include_locations: (Optional) include locations in the returned
dict of information if the image service API
supports it. If the image service API does
not support the locations attribute, it will
still be included in the returned dict, as an
empty list.
:param show_deleted: (Optional) show the image even the status of
image is deleted.
"""
version = 1
if include_locations:
version = 2
try:
image = self._client.call(context, version, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not show_deleted and getattr(image, 'deleted', False):
raise exception.ImageNotFound(image_id=image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
image = _translate_from_glance(image,
include_locations=include_locations)
if include_locations:
locations = image.get('locations', None) or []
du = image.get('direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
image['locations'] = locations
return image
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception:
LOG.error(_LE("Failed to instantiate the download handler "
"for %(scheme)s"), {'scheme': scheme})
return
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
image = self.show(context, image_id, include_locations=True)
for entry in image.get('locations', []):
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
LOG.info(_LI("Successfully transferred "
"using %s"), o.scheme)
return
except Exception:
LOG.exception(_LE("Download image error"))
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
# Retrieve properties for verification of Glance image signature
verifier = None
if CONF.glance.verify_glance_signatures:
image_meta_dict = self.show(context, image_id,
include_locations=False)
image_meta = objects.ImageMeta.from_dict(image_meta_dict)
img_signature = image_meta.properties.get('img_signature')
img_sig_hash_method = image_meta.properties.get(
'img_signature_hash_method'
)
img_sig_cert_uuid = image_meta.properties.get(
'img_signature_certificate_uuid'
)
img_sig_key_type = image_meta.properties.get(
'img_signature_key_type'
)
try:
verifier = signature_utils.get_verifier(context,
img_sig_cert_uuid,
img_sig_hash_method,
img_signature,
img_sig_key_type)
except exception.SignatureVerificationError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
# Perform image signature verification
if verifier:
try:
for chunk in image_chunks:
verifier.update(chunk)
verifier.verify()
LOG.info(_LI('Image signature verification succeeded '
'for image: %s'), image_id)
except cryptography.exceptions.InvalidSignature:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
return image_chunks
else:
try:
for chunk in image_chunks:
if verifier:
verifier.update(chunk)
data.write(chunk)
if verifier:
verifier.verify()
LOG.info(_LI('Image signature verification succeeded '
'for image %s'), image_id)
except cryptography.exceptions.InvalidSignature:
data.truncate(0)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error writing to %(path)s: %(exception)s"),
{'path': dst_path, 'exception': ex})
finally:
if close_file:
data.close()
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = _translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
try:
recv_service_image_meta = self._client.call(
context, 1, 'create', **sent_service_image_meta)
except glanceclient.exc.HTTPException:
_reraise_translated_exception()
return _translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 1, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return _translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
:raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_id)
return True
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'page_size', 'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
def _is_image_public(image):
# NOTE(jaypipes) V2 Glance API replaced the is_public attribute
# with a visibility attribute. We do this here to prevent the
# glanceclient for a V2 image model from throwing an
# exception from warlock when trying to access an is_public
# attribute.
if hasattr(image, 'visibility'):
return str(image.visibility).lower() == 'public'
else:
return image.is_public
if context.is_admin or _is_image_public(image):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
def _translate_from_glance(image, include_locations=False):
image_meta = _extract_attributes(image,
include_locations=include_locations)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image, include_locations=False):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'direct_url', 'locations']
queued = getattr(image, 'status') == 'queued'
queued_exclude_attrs = ['disk_format', 'container_format']
include_locations_attrs = ['direct_url', 'locations']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
# image may not have 'name' attr
elif attr == 'name':
output[attr] = getattr(image, attr, None)
# NOTE(liusheng): queued image may not have these attributes and 'name'
elif queued and attr in queued_exclude_attrs:
output[attr] = getattr(image, attr, None)
# NOTE(mriedem): Only get location attrs if including locations.
elif attr in include_locations_attrs:
if include_locations:
output[attr] = getattr(image, attr, None)
# NOTE(mdorman): 'size' attribute must not be 'None', so use 0 instead
elif attr == 'size':
output[attr] = getattr(image, attr) or 0
else:
# NOTE(xarses): Anything that is caught with the default value
# will result in an additional lookup to glance for said attr.
# Notable attributes that could have this issue:
# disk_format, container_format, name, deleted, checksum
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
six.reraise(new_exc, None, exc_trace)
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
six.reraise(new_exc, None, exc_trace)
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.ImageBadRequest(image_id=image_id,
response=six.text_type(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.Forbidden(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(six.text_type(exc_value))
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, endpoint) = _endpoint_from_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
endpoint=endpoint)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
class UpdateGlanceImage(object):
def __init__(self, context, image_id, metadata, stream):
self.context = context
self.image_id = image_id
self.metadata = metadata
self.image_stream = stream
def start(self):
image_service, image_id = (
get_remote_image_service(self.context, self.image_id))
image_service.update(self.context, image_id, self.metadata,
self.image_stream, purge_props=False)
| |
import sys
import nose
from nose.tools import assert_raises
from mpl_toolkits.mplot3d import Axes3D, axes3d
from matplotlib import cm
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
@image_comparison(baseline_images=['bar3d'], remove_text=True)
def test_bar3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]):
xs = np.arange(20)
ys = np.arange(20)
cs = [c] * len(xs)
cs[0] = 'c'
ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)
@image_comparison(baseline_images=['contour3d'], remove_text=True)
def test_contour3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['contourf3d'], remove_text=True)
def test_contourf3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['contourf3d_fill'], remove_text=True)
def test_contourf3d_fill():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(np.arange(-2, 2, 0.25), np.arange(-2, 2, 0.25))
Z = X.clip(0, 0)
# This produces holes in the z=0 surface that causes rendering errors if
# the Poly3DCollection is not aware of path code information (issue #4784)
Z[::5, ::5] = 0.1
cset = ax.contourf(X, Y, Z, offset=0, levels=[-0.1, 0], cmap=cm.coolwarm)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_zlim(-1, 1)
@image_comparison(baseline_images=['lines3d'], remove_text=True)
def test_lines3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z)
@image_comparison(baseline_images=['mixedsubplot'], remove_text=True)
def test_mixedsubplots():
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1, e1)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
fig = plt.figure(figsize=plt.figaspect(2.))
ax = fig.add_subplot(2, 1, 1)
l = ax.plot(t1, f(t1), 'bo',
t2, f(t2), 'k--', markerfacecolor='green')
ax.grid(True)
ax = fig.add_subplot(2, 1, 2, projection='3d')
X, Y = np.meshgrid(np.arange(-5, 5, 0.25), np.arange(-5, 5, 0.25))
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1, 1)
@image_comparison(baseline_images=['scatter3d'], remove_text=True)
def test_scatter3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.arange(10), np.arange(10), np.arange(10),
c='r', marker='o')
ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20),
c='b', marker='^')
@image_comparison(baseline_images=['scatter3d_color'], remove_text=True,
extensions=['png'])
def test_scatter3d_color():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.arange(10), np.arange(10), np.arange(10),
color='r', marker='o')
ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20),
color='b', marker='s')
@image_comparison(baseline_images=['surface3d'], remove_text=True)
def test_surface3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
lw=0, antialiased=False)
ax.set_zlim(-1.01, 1.01)
fig.colorbar(surf, shrink=0.5, aspect=5)
@image_comparison(baseline_images=['text3d'])
def test_text3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))
xs = (2, 6, 4, 9, 7, 2)
ys = (6, 4, 8, 7, 2, 2)
zs = (4, 2, 5, 6, 1, 7)
for zdir, x, y, z in zip(zdirs, xs, ys, zs):
label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)
ax.text(x, y, z, label, zdir)
ax.text(1, 1, 1, "red", color='red')
ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)
ax.set_xlim3d(0, 10)
ax.set_ylim3d(0, 10)
ax.set_zlim3d(0, 10)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
@image_comparison(baseline_images=['trisurf3d'], remove_text=True)
def test_trisurf3d():
n_angles = 36
n_radii = 8
radii = np.linspace(0.125, 1.0, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
z = np.sin(-x*y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)
@image_comparison(baseline_images=['wireframe3d'], remove_text=True)
def test_wireframe3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
@image_comparison(baseline_images=['wireframe3dzerocstride'], remove_text=True,
extensions=['png'])
def test_wireframe3dzerocstride():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=0)
@image_comparison(baseline_images=['wireframe3dzerorstride'], remove_text=True,
extensions=['png'])
def test_wireframe3dzerorstride():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=0, cstride=10)
@cleanup
def test_wireframe3dzerostrideraises():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
with assert_raises(ValueError):
ax.plot_wireframe(X, Y, Z, rstride=0, cstride=0)
@image_comparison(baseline_images=['quiver3d'], remove_text=True)
def test_quiver3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_empty'], remove_text=True)
def test_quiver3d_empty():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:0j, -1:0.8:0j, -1:0.6:0j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_masked'], remove_text=True)
def test_quiver3d_masked():
fig = plt.figure()
ax = fig.gca(projection='3d')
# Using mgrid here instead of ogrid because masked_where doesn't
# seem to like broadcasting very much...
x, y, z = np.mgrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
u = np.ma.masked_where((-0.4 < x) & (x < 0.1), u, copy=False)
v = np.ma.masked_where((0.1 < y) & (y < 0.7), v, copy=False)
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_pivot_middle'], remove_text=True,
extensions=['png'])
def test_quiver3d_pivot_middle():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1, pivot='middle')
@image_comparison(baseline_images=['quiver3d_pivot_tail'], remove_text=True,
extensions=['png'])
def test_quiver3d_pivot_tail():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1, pivot='tail')
@image_comparison(baseline_images=['axes3d_labelpad'], extensions=['png'])
def test_axes3d_labelpad():
from nose.tools import assert_equal
from matplotlib import rcParams
fig = plt.figure()
ax = Axes3D(fig)
# labelpad respects rcParams
assert_equal(ax.xaxis.labelpad, rcParams['axes.labelpad'])
# labelpad can be set in set_label
ax.set_xlabel('X LABEL', labelpad=10)
assert_equal(ax.xaxis.labelpad, 10)
ax.set_ylabel('Y LABEL')
ax.set_zlabel('Z LABEL')
# or manually
ax.yaxis.labelpad = 20
ax.zaxis.labelpad = -40
# Tick labels also respect tick.pad (also from rcParams)
for i, tick in enumerate(ax.yaxis.get_major_ticks()):
tick.set_pad(tick.get_pad() - i * 5)
@image_comparison(baseline_images=['axes3d_cla'], extensions=['png'])
def test_axes3d_cla():
# fixed in pull request 4553
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
ax.set_axis_off()
ax.cla() # make sure the axis displayed is 3D (not 2D)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| |
#!/usr/bin/python
#
# Average bandwidth monitoring script. Run periodically via NM db.sync to
# enforce a soft limit on daily bandwidth usage for each slice. If a
# slice is found to have transmitted 80% of its daily byte limit usage,
# its instantaneous rate will be capped at the bytes remaning in the limit
# over the time remaining in the recording period.
#
# Two separate limits are enforced, one for destinations exempt from
# the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Andy Bavier <acb@cs.princeton.edu>
# Faiyaz Ahmed <faiyaza@cs.princeton.edu>
# Copyright (C) 2004-2008 The Trustees of Princeton University
#
import os
import sys
import time
import pickle
import socket
import copy
import threading
import logger
import tools
import bwlimit
import database
from config import Config
priority = 20
# Defaults
# Set DEBUG to True if you don't want to send emails
DEBUG = False
# Set ENABLE to False to setup buckets, but not limit.
ENABLE = True
DB_FILE = "/var/lib/nodemanager/bwmon.pickle"
# Constants
seconds_per_day = 24 * 60 * 60
bits_per_byte = 8
dev_default = tools.get_default_if()
# Burst to line rate (or node cap). Set by NM. in KBit/s
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
# 5.4 Gbyte per day max allowed transfered per recording period
# 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
# but its better to keep a higher byte total and keep people happy than correct
# the problem and piss people off.
# default_MaxKByte = 5662310
# -- 6/1/09
# llp wants to double these, so we use the following
# 1mbit * 24hrs * 60mins * 60secs = bits/day
# 1000000 * 24 * 60 * 60 / (1024 * 8)
default_MaxKByte = 10546875
# 16.4 Gbyte per day max allowed transfered per recording period to I2
# default_Maxi2KByte = 17196646
# -- 6/1/09
# 3Mb/s for 24hrs a day (30.17 gigs)
default_Maxi2KByte = 31640625
# Default share quanta
default_Share = 1
# Average over 1 day
period = 1 * seconds_per_day
# Message template
template = \
"""
The slice %(slice)s has transmitted more than %(bytes)s from
%(hostname)s to %(class)s destinations
since %(since)s.
Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
until %(until)s.
Please reduce the average %(class)s transmission rate
of the slice to %(limit)s per %(period)s.
""".lstrip()
footer = \
"""
%(date)s %(hostname)s bwcap %(slice)s
""".lstrip()
def format_bytes(bytes, si = True):
"""
Formats bytes into a string
"""
if si:
kilo = 1000.
else:
# Officially, a kibibyte
kilo = 1024.
if bytes >= (kilo * kilo * kilo):
return "%.1f GB" % (bytes / (kilo * kilo * kilo))
elif bytes >= 1000000:
return "%.1f MB" % (bytes / (kilo * kilo))
elif bytes >= 1000:
return "%.1f KB" % (bytes / kilo)
else:
return "%.0f bytes" % bytes
def format_period(seconds):
"""
Formats a period in seconds into a string
"""
if seconds == (24 * 60 * 60):
return "day"
elif seconds == (60 * 60):
return "hour"
elif seconds > (24 * 60 * 60):
return "%.1f days" % (seconds / 24. / 60. / 60.)
elif seconds > (60 * 60):
return "%.1f hours" % (seconds / 60. / 60.)
elif seconds > (60):
return "%.1f minutes" % (seconds / 60.)
else:
return "%.0f seconds" % seconds
def slicemail(slice, subject, body):
'''
Front end to sendmail. Sends email to slice alias with given subject and body.
'''
config = Config()
sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % config.PLC_MAIL_SUPPORT_ADDRESS, "w")
# Parsed from MyPLC config
to = [config.PLC_MAIL_MOM_LIST_ADDRESS]
if slice is not None and slice != "root":
to.append(config.PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
header = {'from': "%s Support <%s>" % (config.PLC_NAME,
config.PLC_MAIL_SUPPORT_ADDRESS),
'to': ", ".join(to),
'version': sys.version.split(" ")[0],
'subject': subject}
# Write headers
sendmail.write(
"""
Content-type: text/plain
From: %(from)s
Reply-To: %(from)s
To: %(to)s
X-Mailer: Python/%(version)s
Subject: %(subject)s
""".lstrip() % header)
# Write body
sendmail.write(body)
# Done
sendmail.close()
class Slice:
"""
Stores the last recorded bandwidth parameters of a slice.
xid - slice context/VServer ID
name - slice name
time - beginning of recording period in UNIX seconds
bytes - low bandwidth bytes transmitted at the beginning of the recording period
i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
MaxKByte - total volume of data allowed
ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
Maxi2KByte - same as MaxKByte, but for i2
Threshi2Kbyte - same as Threshi2KByte, but for i2
MaxRate - max_rate slice attribute.
Maxi2Rate - max_exempt_rate slice attribute.
Share - Used by Sirius to loan min rates
Sharei2 - Used by Sirius to loan min rates for i2
self.emailed - did slice recv email during this recording period
"""
def __init__(self, xid, name, rspec):
self.xid = xid
self.name = name
self.time = 0
self.bytes = 0
self.i2bytes = 0
self.MaxRate = default_MaxRate
self.MinRate = bwlimit.bwmin / 1000
self.Maxi2Rate = default_Maxi2Rate
self.Mini2Rate = bwlimit.bwmin / 1000
self.MaxKByte = default_MaxKByte
self.ThreshKByte = int(.8 * self.MaxKByte)
self.Maxi2KByte = default_Maxi2KByte
self.Threshi2KByte = int(.8 * self.Maxi2KByte)
self.Share = default_Share
self.Sharei2 = default_Share
self.emailed = False
self.capped = False
self.updateSliceTags(rspec)
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
maxexemptrate = self.Maxi2Rate * 1000,
minexemptrate = self.Mini2Rate * 1000,
share = self.Share)
def __repr__(self):
return self.name
def updateSliceTags(self, rspec):
'''
Use respects from GetSlivers to PLC to populate slice object. Also
do some sanity checking.
'''
# Sanity check plus policy decision for MinRate:
# Minrate cant be greater than 25% of MaxRate or NodeCap.
MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
if MinRate > int(.25 * default_MaxRate):
MinRate = int(.25 * default_MaxRate)
if MinRate != self.MinRate:
self.MinRate = MinRate
logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
if MaxRate != self.MaxRate:
self.MaxRate = MaxRate
logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
if Mini2Rate != self.Mini2Rate:
self.Mini2Rate = Mini2Rate
logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
if Maxi2Rate != self.Maxi2Rate:
self.Maxi2Rate = Maxi2Rate
logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
if MaxKByte != self.MaxKByte:
self.MaxKByte = MaxKByte
logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
if Maxi2KByte != self.Maxi2KByte:
self.Maxi2KByte = Maxi2KByte
logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
if ThreshKByte != self.ThreshKByte:
self.ThreshKByte = ThreshKByte
logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
if Threshi2KByte != self.Threshi2KByte:
self.Threshi2KByte = Threshi2KByte
logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
Share = int(rspec.get('net_share', default_Share))
if Share != self.Share:
self.Share = Share
logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
Sharei2 = int(rspec.get('net_i2_share', default_Share))
if Sharei2 != self.Sharei2:
self.Sharei2 = Sharei2
logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
def reset(self, runningrates, rspec):
"""
Begin a new recording period. Remove caps by restoring limits
to their default values.
"""
# Cache share for later comparison
self.Share = runningrates.get('share', 1)
# Query Node Manager for max rate overrides
self.updateSliceTags(rspec)
# Reset baseline time
self.time = time.time()
# Reset baseline byte coutns
self.bytes = runningrates.get('usedbytes', 0)
self.i2bytes = runningrates.get('usedi2bytes', 0)
# Reset email
self.emailed = False
# Reset flag
self.capped = False
# Reset rates.
maxrate = self.MaxRate * 1000
minrate = self.MinRate * 1000
maxi2rate = self.Maxi2Rate * 1000
mini2rate = self.Mini2Rate * 1000
if (maxrate != runningrates.get('maxrate', 0)) or \
(minrate != runningrates.get('maxrate', 0)) or \
(maxi2rate != runningrates.get('maxexemptrate', 0)) or \
(mini2rate != runningrates.get('minexemptrate', 0)) or \
(self.Share != runningrates.get('share', 0)):
logger.log("bwmon: %s reset to %s/%s" % \
(self.name,
bwlimit.format_tc_rate(maxrate),
bwlimit.format_tc_rate(maxi2rate)))
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
maxexemptrate = self.Maxi2Rate * 1000,
minexemptrate = self.Mini2Rate * 1000,
share = self.Share)
def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
"""
Notify the slice it's being capped.
"""
# Prepare message parameters from the template
message = ""
params = {'slice': self.name, 'hostname': socket.gethostname(),
'since': time.asctime(time.gmtime(self.time)) + " GMT",
'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
'date': time.asctime(time.gmtime()) + " GMT",
'period': format_period(period)}
if new_maxrate != (self.MaxRate * 1000):
# Format template parameters for low bandwidth message
params['class'] = "low bandwidth"
params['bytes'] = format_bytes(usedbytes - self.bytes)
params['limit'] = format_bytes(self.MaxKByte * 1024)
params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
# Cap low bandwidth burst rate
message += template % params
logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
if new_maxexemptrate != (self.Maxi2Rate * 1000):
# Format template parameters for high bandwidth message
params['class'] = "high bandwidth"
params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
params['limit'] = format_bytes(self.Maxi2KByte * 1024)
params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
message += template % params
logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
# Notify slice
if self.emailed == False:
subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
if DEBUG:
logger.log("bwmon: "+ subject)
logger.log("bwmon: "+ message + (footer % params))
else:
self.emailed = True
logger.log("bwmon: Emailing %s" % self.name)
slicemail(self.name, subject, message + (footer % params))
def update(self, runningrates, rspec):
"""
Update byte counts and check if byte thresholds have been
exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
Recalculate every time module runs.
"""
# cache share for later comparison
runningrates['share'] = self.Share
# Query Node Manager for max rate overrides
self.updateSliceTags(rspec)
usedbytes = runningrates['usedbytes']
usedi2bytes = runningrates['usedi2bytes']
# Check limits.
if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
sum = self.bytes + (self.ThreshKByte * 1024)
maxbyte = self.MaxKByte * 1024
bytesused = usedbytes - self.bytes
timeused = int(time.time() - self.time)
# Calcuate new rate. in bit/s
new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
# Never go under MinRate
if new_maxrate < (self.MinRate * 1000):
new_maxrate = self.MinRate * 1000
# State information. I'm capped.
self.capped += True
else:
# Sanity Check
new_maxrate = self.MaxRate * 1000
self.capped += False
if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
maxi2byte = self.Maxi2KByte * 1024
i2bytesused = usedi2bytes - self.i2bytes
timeused = int(time.time() - self.time)
# Calcuate New Rate.
new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
# Never go under MinRate
if new_maxi2rate < (self.Mini2Rate * 1000):
new_maxi2rate = self.Mini2Rate * 1000
# State information. I'm capped.
self.capped += True
else:
# Sanity
new_maxi2rate = self.Maxi2Rate * 1000
self.capped += False
# Check running values against newly calculated values so as not to run tc
# unnecessarily
if (runningrates['maxrate'] != new_maxrate) or \
(runningrates['minrate'] != self.MinRate * 1000) or \
(runningrates['maxexemptrate'] != new_maxi2rate) or \
(runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
(runningrates['share'] != self.Share):
# Apply parameters
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = new_maxrate,
minexemptrate = self.Mini2Rate * 1000,
maxexemptrate = new_maxi2rate,
share = self.Share)
# Notify slice
if self.capped == True:
self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
def gethtbs(root_xid, default_xid):
"""
Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
Turn off HTBs without names.
"""
livehtbs = {}
for params in bwlimit.get(dev = dev_default):
(xid, share,
minrate, maxrate,
minexemptrate, maxexemptrate,
usedbytes, usedi2bytes) = params
name = bwlimit.get_slice(xid)
if (name is None) \
and (xid != root_xid) \
and (xid != default_xid):
# Orphaned (not associated with a slice) class
name = "%d?" % xid
logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
bwlimit.off(xid, dev = dev_default)
livehtbs[xid] = {'share': share,
'minrate': minrate,
'maxrate': maxrate,
'maxexemptrate': maxexemptrate,
'minexemptrate': minexemptrate,
'usedbytes': usedbytes,
'name': name,
'usedi2bytes': usedi2bytes}
return livehtbs
def sync(nmdbcopy):
"""
Syncs tc, db, and bwmon.pickle.
Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
Sends emails and caps those that went over their limit.
"""
# Defaults
global DB_FILE, \
period, \
default_MaxRate, \
default_Maxi2Rate, \
default_MaxKByte,\
default_Maxi2KByte,\
default_Share, \
dev_default
# All slices
names = []
# In case the limits have changed.
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# Incase default isn't set yet.
if default_MaxRate == -1:
default_MaxRate = 1000000
try:
f = open(DB_FILE, "r+")
logger.verbose("bwmon: Loading %s" % DB_FILE)
(version, slices, deaddb) = pickle.load(f)
f.close()
# Check version of data file
if version != "$Id$":
logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
raise Exception
except Exception:
version = "$Id$"
slices = {}
deaddb = {}
# Get/set special slice IDs
root_xid = bwlimit.get_xid("root")
default_xid = bwlimit.get_xid("default")
# Since root is required for sanity, its not in the API/plc database, so pass {}
# to use defaults.
if root_xid not in slices.keys():
slices[root_xid] = Slice(root_xid, "root", {})
slices[root_xid].reset({}, {})
# Used by bwlimit. pass {} since there is no rspec (like above).
if default_xid not in slices.keys():
slices[default_xid] = Slice(default_xid, "default", {})
slices[default_xid].reset({}, {})
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
# db keys on name, bwmon keys on xid. db doesnt have xid either.
for plcSliver in nmdbcopy.keys():
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
if live.has_key(nohtbslice):
slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
else:
logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
del slices[nohtbslice]
# The dat file doesnt have HTB for the slice but kern has HTB
slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
if live.has_key(slicenodat):
# init the slice. which means start accounting over since kernel
# htb was already there.
slices[slicenodat] = Slice(slicenodat,
live[slicenodat]['name'],
live[slicenodat]['_rspec'])
# Get new slices.
# Slices in GetSlivers but not running HTBs
newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
# Setup new slices
for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
if newslice != None and live[newslice].has_key('_rspec') == True:
# Check to see if we recently deleted this slice.
if live[newslice]['name'] not in deaddb.keys():
logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
slices[newslice].reset( {}, live[newslice]['_rspec'] )
# Double check time for dead slice in deaddb is within 24hr recording period.
elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
deadslice = deaddb[live[newslice]['name']]
logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
slices[newslice] = deadslice['slice']
slices[newslice].xid = newslice
# Start the HTB
newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
"minrate": deadslice['slice'].MinRate * 1000,
"maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
"usedbytes": deadslice['htb']['usedbytes'] * 1000,
"usedi2bytes": deadslice['htb']['usedi2bytes'],
"share":deadslice['htb']['share']}
slices[newslice].reset(newvals, live[newslice]['_rspec'])
# Bring up to date
slices[newslice].update(newvals, live[newslice]['_rspec'])
# Since the slice has been reinitialed, remove from dead database.
del deaddb[deadslice['slice'].name]
del newvals
else:
logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
# Move dead slices that exist in the pickle file, but
# aren't instantiated by PLC into the dead dict until
# recording period is over. This is to avoid the case where a slice is dynamically created
# and destroyed then recreated to get around byte limits.
deadxids = set(slices.keys()) - set(live.keys())
logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
for deadxid in deadxids:
if deadxid == root_xid or deadxid == default_xid:
continue
logger.log("bwmon: removing dead slice %s " % deadxid)
if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
# add slice (by name) to deaddb
logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
if kernelhtbs.has_key(deadxid):
logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid, dev = dev_default)
# Clean up deaddb
for deadslice in deaddb.keys():
if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
logger.log("bwmon: Removing dead slice %s from dat." \
% deaddb[deadslice]['slice'].name)
del deaddb[deadslice]
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
# Update all byte limites on all slices
for (xid, slice) in slices.iteritems():
# Monitor only the specified slices
if xid == root_xid or xid == default_xid: continue
if names and name not in names:
continue
if (time.time() >= (slice.time + period)) or \
(kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
(kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
# Reset to defaults every 24 hours or if it appears
# that the byte counters have overflowed (or, more
# likely, the node was restarted or the HTB buckets
# were re-initialized).
slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
elif ENABLE:
logger.verbose("bwmon: Updating slice %s" % slice.name)
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
# doesnt use generic default interface because this runs as its own thread.
# changing the config variable will not have an effect since GetSlivers: pass
def getDefaults(nmdbcopy):
'''
Get defaults from default slice's slice attributes.
'''
status = True
# default slice
dfltslice = nmdbcopy.get(Config().PLC_SLICE_PREFIX+"_default")
if dfltslice:
if dfltslice['rspec']['net_max_rate'] == -1:
allOff()
status = False
return status
def allOff():
"""
Turn off all slice HTBs
"""
# Get/set special slice IDs
root_xid = bwlimit.get_xid("root")
default_xid = bwlimit.get_xid("default")
kernelhtbs = gethtbs(root_xid, default_xid)
if len(kernelhtbs):
logger.log("bwmon: Disabling all running HTBs.")
for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
lock = threading.Event()
def run():
"""
When run as a thread, wait for event, lock db, deep copy it, release it,
run bwmon.GetSlivers(), then go back to waiting.
"""
logger.verbose("bwmon: Thread started")
while True:
lock.wait()
logger.verbose("bwmon: Event received. Running.")
database.db_lock.acquire()
nmdbcopy = copy.deepcopy(database.db)
database.db_lock.release()
try:
if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
# class show to check if net:InitNodeLimit:bwlimit.init has run.
sync(nmdbcopy)
else: logger.log("bwmon: BW limits DISABLED.")
except: logger.log_exc("bwmon failed")
lock.clear()
def start(*args):
tools.as_daemon_thread(run)
def GetSlivers(*args):
logger.verbose ("bwmon: triggering dummy GetSlivers")
pass
| |
from coco.contract.backends import ContainerBackend
from coco.contract.errors import ContainerBackendError, ContainerNotFoundError
from coco.core import settings
from coco.core.helpers import get_storage_backend
from coco.core.models import Container, ContainerImage, PortMapping
from coco.core.signals.signals import *
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from os import path
import time
storage_backend = get_storage_backend()
def create_container_port_mappings(container):
"""
Create the port mappings for the given container.
:param container: The container to create the mappings for.
"""
ports = []
image = None
if container.is_image_based():
image = container.image
elif container.is_clone() and container.clone_of.is_image_based():
image = container.clone_of.image
if image:
protected_port = image.protected_port
public_ports = image.public_ports
if protected_port:
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=protected_port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: mapping.server.internal_ip,
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
if public_ports:
for port in public_ports.split(','):
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: '0.0.0.0',
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
return ports
@receiver(container_created)
def create_on_server(sender, container, **kwargs):
"""
Create the newly saved container on the server's container backend.
"""
if container is not None:
ports = create_container_port_mappings(container)
clone_of = None
cmd = None
image = None
if container.is_image_based():
cmd = container.image.command
image = container.image.backend_pk
elif container.is_clone():
clone_of = container.clone_of.backend_pk
if container.clone_of.is_image_based():
cmd = container.clone_of.image.command
result = None
try:
result = container.server.get_container_backend().create_container(
container.owner.backend_pk,
container.owner.backend_id,
container.name,
ports,
[
{ # home directory
ContainerBackend.VOLUME_KEY_SOURCE: path.join(storage_backend.base_dir, settings.STORAGE_DIR_HOME),
ContainerBackend.VOLUME_KEY_TARGET: '/home'
},
{ # public directory
ContainerBackend.VOLUME_KEY_SOURCE: path.join(storage_backend.base_dir, settings.STORAGE_DIR_PUBLIC),
ContainerBackend.VOLUME_KEY_TARGET: path.join('/data', 'public')
},
{ # shares directory
ContainerBackend.VOLUME_KEY_SOURCE: path.join(storage_backend.base_dir, settings.STORAGE_DIR_SHARES),
ContainerBackend.VOLUME_KEY_TARGET: path.join('/data', 'shares')
}
],
cmd=cmd,
base_url=container.get_backend_base_url(),
image=image,
clone_of=clone_of
)
except ContainerBackendError as ex:
container.delete() # XXX: cleanup?
raise ex
if result.get(ContainerBackend.CONTAINER_KEY_CLONE_IMAGE, None) is None:
container.backend_pk = result.get(ContainerBackend.KEY_PK)
else:
container.backend_pk = result.get(ContainerBackend.CONTAINER_KEY_CLONE_CONTAINER).get(ContainerBackend.KEY_PK)
# an image has been created internally, add it to our DB
# TODO: what is the base container doesn't base on an image?
backend_image = result.get(ContainerBackend.CONTAINER_KEY_CLONE_IMAGE)
image = ContainerImage(
backend_pk=backend_image.get(ContainerBackend.KEY_PK),
name=container.clone_of.image.name + '-clone-' + str(int(time.time())),
short_description="Internal only image created during the cloning process of container %s." % container.clone_of.get_friendly_name(),
description=container.clone_of.image.description,
command=container.clone_of.image.command,
protected_port=container.clone_of.image.protected_port,
public_ports=container.clone_of.image.public_ports,
owner=container.owner.django_user,
is_internal=True
)
image.save()
container.image = image
container.save()
@receiver(container_deleted)
def delete_related_notifications(sender, container, **kwargs):
"""
Delete the container's related notifications upon deletion.
"""
if container is not None and hasattr(container, 'related_notifications'):
container.related_notifications.all().delete()
@receiver(container_deleted)
def delete_on_server(sender, container, **kwargs):
"""
Delete the destroyed container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().delete_container(container.backend_pk)
# cleanup internal images
# if container.is_image_based() and container.image.is_internal and not container.has_clones():
# if not Container.objects.filter(image=container.image).exists():
# container.image.delete()
except ContainerNotFoundError as ex:
pass # already deleted
except ContainerBackendError as ex:
# XXX: restore?
raise ex
@receiver(container_restarted)
def restart_on_server(sender, container, **kwargs):
"""
Restart the container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().restart_container(container.backend_pk)
except ContainerBackendError as ex:
raise ex
@receiver(container_resumed)
def resume_on_server(sender, container, **kwargs):
"""
Resume the container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().resume_container(container.backend_pk)
except ContainerBackendError as ex:
raise ex
@receiver(container_started)
def start_on_server(sender, container, **kwargs):
"""
Start the container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().start_container(container.backend_pk)
except ContainerBackendError as ex:
raise ex
@receiver(container_stopped)
def stop_on_server(sender, container, **kwargs):
"""
Stop the container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().stop_container(container.backend_pk)
except ContainerBackendError as ex:
raise ex
@receiver(container_suspended)
def suspend_on_server(sender, container, **kwargs):
"""
Suspend the container on the container_backend.
"""
if container is not None:
try:
container.server.get_container_backend().suspend_container(container.backend_pk)
except ContainerBackendError as ex:
raise ex
@receiver(post_delete, sender=Container)
def post_delete_handler(sender, instance, **kwargs):
"""
Method to map Django post_delete model signals to custom ones.
"""
container_deleted.send(sender=sender, container=instance, kwargs=kwargs)
@receiver(post_save, sender=Container)
def post_save_handler(sender, instance, **kwargs):
"""
Method to map Django post_save model signals to custom ones.
"""
if 'created' in kwargs and kwargs.get('created'):
container_created.send(sender=sender, container=instance, kwargs=kwargs)
else:
container_modified.send(
sender=sender,
container=instance,
fields=kwargs.get('update_fields'),
kwargs=kwargs
)
| |
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for LUBackup*"""
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
from ganeti import query
from testsupport import *
import testutils
class TestLUBackupPrepare(CmdlibTestCase):
@patchUtils("instance_utils")
def testPrepareLocalExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_LOCAL)
self.ExecOpCode(op)
@patchUtils("instance_utils")
def testPrepareRemoteExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
self.rpc.call_x509_cert_create.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(inst.primary_node,
("key_name",
testutils.ReadTestData("cert1.pem")))
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_REMOTE)
self.ExecOpCode(op)
def InstanceRemoved(remove_instance):
"""Checks whether the instance was removed during a test of opcode execution.
"""
def WrappingFunction(fn):
def CheckingFunction(self, *args, **kwargs):
fn(self, *args, **kwargs)
instance_removed = (self.rpc.call_blockdev_remove.called -
self.rpc.call_blockdev_snapshot.called) > 0
if remove_instance and not instance_removed:
raise self.fail(msg="Instance not removed when it should have been")
if not remove_instance and instance_removed:
raise self.fail(msg="Instance removed when it should not have been")
return CheckingFunction
return WrappingFunction
def TrySnapshots(try_snapshot):
"""Checks whether an attempt to snapshot disks should have been attempted.
"""
def WrappingFunction(fn):
def CheckingFunction(self, *args, **kwargs):
fn(self, *args, **kwargs)
snapshots_tried = self.rpc.call_blockdev_snapshot.called > 0
if try_snapshot and not snapshots_tried:
raise self.fail(msg="Disks should have been snapshotted but weren't")
if not try_snapshot and snapshots_tried:
raise self.fail(msg="Disks snapshotted without a need to do so")
return CheckingFunction
return WrappingFunction
class TestLUBackupExportBase(CmdlibTestCase):
def setUp(self):
super(TestLUBackupExportBase, self).setUp()
self.rpc.call_instance_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, True)
self.rpc.call_blockdev_assemble.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("/dev/mock_path",
"/dev/mock_link_name",
None))
self.rpc.call_blockdev_shutdown.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_blockdev_snapshot.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("mock_vg", "mock_id"))
self.rpc.call_blockdev_remove.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_export_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, "export_daemon")
def ImpExpStatus(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[objects.ImportExportStatus(
exit_status=0
)])
self.rpc.call_impexp_status.side_effect = ImpExpStatus
def ImpExpCleanup(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid)
self.rpc.call_impexp_cleanup.side_effect = ImpExpCleanup
self.rpc.call_finalize_export.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
def testRemoveRunningInstanceWithoutShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name,
shutdown=False,
remove_instance=True)
self.ExecOpCodeExpectOpPrereqError(
op, "Can not remove instance without shutting it down before")
class TestLUBackupExportLocalExport(TestLUBackupExportBase):
def setUp(self):
# The initial instance prep
super(TestLUBackupExportLocalExport, self).setUp()
self.target_node = self.cfg.AddNewNode()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_LOCAL,
target_node=self.target_node.name)
self._PrepareInstance()
self.rpc.call_import_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.target_node, "import_daemon")
def _PrepareInstance(self, online=False, snapshottable=True):
"""Produces an instance for export tests, and updates the opcode.
"""
if online:
admin_state = constants.ADMINST_UP
else:
admin_state = constants.ADMINST_DOWN
if snapshottable:
disk_template = constants.DT_PLAIN
else:
disk_template = constants.DT_FILE
inst = self.cfg.AddNewInstance(admin_state=admin_state,
disk_template=disk_template)
self.op = self.CopyOpCode(self.op, instance_name=inst.name)
@TrySnapshots(True)
@InstanceRemoved(False)
def testPlainExportWithShutdown(self):
self._PrepareInstance(online=True)
self.ExecOpCode(self.op)
@TrySnapshots(False)
@InstanceRemoved(False)
def testFileExportWithShutdown(self):
self._PrepareInstance(online=True, snapshottable=False)
self.ExecOpCodeExpectOpExecError(self.op, ".*--long-sleep option.*")
@TrySnapshots(False)
@InstanceRemoved(False)
def testFileLongSleepExport(self):
self._PrepareInstance(online=True, snapshottable=False)
op = self.CopyOpCode(self.op, long_sleep=True)
self.ExecOpCode(op)
@TrySnapshots(True)
@InstanceRemoved(False)
def testPlainLiveExport(self):
self._PrepareInstance(online=True)
op = self.CopyOpCode(self.op, shutdown=False)
self.ExecOpCode(op)
@TrySnapshots(False)
@InstanceRemoved(False)
def testFileLiveExport(self):
self._PrepareInstance(online=True, snapshottable=False)
op = self.CopyOpCode(self.op, shutdown=False)
self.ExecOpCodeExpectOpExecError(op, ".*live export.*")
@TrySnapshots(False)
@InstanceRemoved(False)
def testPlainOfflineExport(self):
self._PrepareInstance(online=False)
self.ExecOpCode(self.op)
@TrySnapshots(False)
@InstanceRemoved(False)
def testFileOfflineExport(self):
self._PrepareInstance(online=False, snapshottable=False)
self.ExecOpCode(self.op)
@TrySnapshots(False)
@InstanceRemoved(True)
def testExportRemoveOfflineInstance(self):
self._PrepareInstance(online=False)
op = self.CopyOpCode(self.op, remove_instance=True)
self.ExecOpCode(op)
@TrySnapshots(False)
@InstanceRemoved(True)
def testExportRemoveOnlineInstance(self):
self._PrepareInstance(online=True)
op = self.CopyOpCode(self.op, remove_instance=True)
self.ExecOpCode(op)
@TrySnapshots(False)
@InstanceRemoved(False)
def testValidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="lzop")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCode(op)
@InstanceRemoved(False)
def testInvalidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="invalid")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCodeExpectOpPrereqError(op, "Compression tool not allowed")
def testLiveLongSleep(self):
op = self.CopyOpCode(self.op, shutdown=False, long_sleep=True)
self.ExecOpCodeExpectOpPrereqError(op, ".*long sleep.*")
class TestLUBackupExportRemoteExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportRemoteExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_REMOTE,
instance_name=self.inst.name,
target_node=[],
x509_key_name=["mock_key_name"],
destination_x509_ca="mock_dest_ca")
@InstanceRemoved(False)
def testRemoteExportWithoutX509KeyName(self):
op = self.CopyOpCode(self.op, x509_key_name=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing X509 key name for encryption")
@InstanceRemoved(False)
def testRemoteExportWithoutX509DestCa(self):
op = self.CopyOpCode(self.op, destination_x509_ca=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing destination X509 CA")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| |
## python 2.7
"""
treeTable: A dict-derived TREE data structure in Python
Created on Jun 21, 2016
@author: jayventi
"""
#import datetime
from json import dumps, loads
class TreeTableNode(object):
"""
Tree node object contains all need data to mantane a tree
"""
def __init__(self, node_id, name, parent_id=None, content=None, children=None): # TODO set all
if children is None:
children = []
if content is None:
content = {}
self.node_id = node_id
self.name = name
self.content = content
self.children = children
self.parent_id = parent_id
#self.updated = datetime.datetime # is not JSON serializable
#self.created = datetime.datetime # is not JSON serializable
def __repr__(self):
return '{} : {}'.format(self.name, str(self.node_id))
def add_child_id(self, child_id):
temp = self.children
temp.append(child_id)
self.children = temp
def full_repr(self):
lStr = ' chindren: ['
if self.children:
comm = ''
for child in self.children:
lStr += comm + str(child)
comm = ','
lStr += ']'
retStr = ' '
if self.content:
for k in sorted(self.content):
retStr += '\t' + k + ':\t' + str(self.content[k]) + '\n'
return self.name + "\t parent:" + str(self.parent_id) + lStr + "\t" + retStr
def update(self, new_content):
for keys in new_content:
self.content[keys] = new_content[keys]
class TreeTable(object):
"""
Main tree structure stores node in a dict as values and node_id as keys
self.name is used as a index vaule self._name_to_node_id is the index hash
"""
def __init__(self, name='defaultTree', treeid=1, parentTreeid=None):
self.treeid = treeid
self.name = name
self.lastnode_id = 0
self._node_table = {}
self._name_to_node_id = {}
self.add_child('root', 1)
def add_child(self, name, parent_id, content=None):
""" Adds a child node, main mechanism of building a tree"""
nextnode_id = self.lastnode_id + 1
new_node = TreeTableNode(nextnode_id, name, parent_id, content, [])
self._node_table[nextnode_id] = new_node
self.lastnode_id = nextnode_id
# add child to parent node's chaiedren list
parentNode = self._node_table[parent_id]
if nextnode_id != parent_id: # not root node
parentNode.add_child_id(nextnode_id)
self._node_table[parent_id] = parentNode
self._name_to_node_id[name] = nextnode_id
def set_root_name(self, name, content={}):
"""
Tets the roots name and content
when a TreeTable is created one root node is generated
and name set to 'root'. having a root node with name
root means that it has never been initialized to any
meaningful content and is the definition of a created but
uninitialized tree. This special routine is used to
set the root node to some specific name.
This is the only function which changes the name
of a node after it has been created.
"""
root_node_id = 1
root_node = self._node_table[root_node_id]
root_node.name = name
root_node.content = content
self._node_table[root_node_id] = root_node
self._name_to_node_id[name] = root_node_id
self._name_to_node_id.pop("root", None)
def add_children(self, parent_id, children={}):
""" adds a list of node_ids of children nodes to a node"""
for child in children:
self.add_child(child.name, parent_id, child.content)
def table_dump(self):
print(self._node_table)
def table_full_dump(self):
for each_nd in self._node_table:
text = 'node_id: {} {} \n'.format(str(each_nd), self._node_table[each_nd].full_repr())
return text
def get_children(self, node_id):
temp_node = self._node_table[node_id]
return temp_node.children
def get_node_by_name(self, name):
try:
node_id = self._name_to_node_id[name]
return self._node_table[node_id]
except:
node_id = None
return None
def get_node_by_id(self, node_id):
return self._node_table[node_id]
def getnode_idByName(self, name):
return self._name_to_node_id[name]
def is_node_by_name(self, name):
if name in self._name_to_node_id:
return True
else:
return False
def is_root_set(self):
if 'root' in self._name_to_node_id:
return False
else:
return True
def up_date_node(self, node_id, update_content):
"""" Updates notes content by node_id"""
tempNode = self._node_table[node_id]
tempNode.update(update_content)
self._node_table[node_id] = tempNode
def up_date_node_by_name(self, name, update_content):
"""" Updates notes content by nodes name"""
node_id = self._name_to_node_id[name]
tempNode = self._node_table[node_id]
tempNode.update(update_content)
self._node_table[node_id] = tempNode
def node_count(self):
return len(self._node_table)
def pretty_tree_table(self):
""""
An implementation of printing tree using Stack Print
tree structure in hierarchy style.
For example:
Root
|___ 1
| |___ 2
| |___ 4
| |___ 5
|___ 3
|___ 6
| |___ 7
Uses Stack structure, push and pop nodes with additional level info.
"""
print ('Start pretty_tree_table')
level = str(0)
node_id = 1
stack_of_nodes = [node_id, level] # init stack_of_nodes
nodes_to_dpdate = {level: node_id} # for walk prossess
while stack_of_nodes:
# head_id pointer points to the first item of stack, can be a level identifier or tree node_id
head_id = stack_of_nodes.pop()
if isinstance(head_id, str):
level = head_id # move towards the root up a level
else:
head_node = self._node_table[head_id] # move tword the leaf dowen a level
self.__print_label__(head_node, stack_of_nodes, level, self.__basic_lable__)
children = head_node.children
children.reverse()
if stack_of_nodes:
# push level info seens a head_id was just pop from stack_of_nodes, the level at now
stack_of_nodes.append(level)
nodes_to_dpdate[level] = head_id
if children: # add children if has children nodes
stack_of_nodes.extend(children)
level = str(1 + int(level))
stack_of_nodes.append(level)
def __print_label__(self, head_node, stack_of_nodes, level_str, label_fun):
"""
Print a each node as a line with branch marks
"""
leading = ' '
lasting = '|___ '
label = label_fun(head_node)
level = int(level_str)
if level == 0:
print (leading + label)
else:
for l in range(0, level - 1):
sibling = False
parent_id = self.__get_parent_id__(head_node.node_id, level - l)
parentN = self._node_table[parent_id]
for c in parentN.children:
if c in stack_of_nodes:
sibling = True
break
if sibling:
leading += '| '
else:
leading += ' '
if label.strip() != '-':
print('{0}{1}{2}'.format(leading, lasting, label))
def __basic_lable__(self, head_node):
return str(head_node.node_id)+' - ' + str(head_node.name)
def __get_parent_id__(self, head_id, level_up):
while level_up:
parentnode = self._node_table[head_id]
head_id = parentnode.parent_id
level_up -= 1
return head_id
def get_tree_stuff(self, head_id=1):
"""stuff summer"""
summed_stuff = 1 # get stuff noraly 0 for inner
innerNode = self._node_table[head_id]
for childnode_id in innerNode.children: # hase children
childNode = self._node_table[childnode_id]
if childNode.children: # if childNode has children follow
summed_stuff += self.get_tree_stuff(childnode_id)
else:
stuff = 1 # get stuff
summed_stuff += stuff
# childNode.content = Stuff
print('leaf node: ' + str(childNode)+" stuff: "+str(stuff))
# innerNode.content = summed_stuff
print('inner node: ' + str(head_id)+" stuff: "+str(summed_stuff))
return summed_stuff
def to_json(self):
""" Sterilizes to json contents of the TreeTable"""
to_json_dict = {}
for node_id in self._node_table:
to_json_dict[node_id] = self._node_table[node_id].__dict__
return dumps(to_json_dict)
def from_json(self, from_json):
""" Reconstitutes a tree object from a sterilized json of the same type"""
from_dict = loads(from_json)
if len(from_dict) < 1:
return 0
for jsonnode_id, json_node in from_dict.items():
working_node = TreeTableNode(json_node['node_id'], json_node['name'],
json_node['parent_id'], json_node['content'],
json_node['children']
)
self._node_table[int(jsonnode_id)] = working_node
return self.node_count()
| |
from TCAction import TCActionBase
from NativeLog import NativeLog
import time
import random
import string
TEST_COUNT_ONE_ROUND = 500
class TestCase(TCActionBase.CommonTCActionBase):
def __init__(self, test_case, test_env, timeout=45, log_path=TCActionBase.LOG_PATH):
TCActionBase.CommonTCActionBase.__init__(self, test_case, test_env,
timeout=timeout, log_path=log_path)
self.send_len = 1460
self.server_port = random.randint(10000, 50000)
self.server_port_2 = random.randint(10000, 50000)
self.server_echo = True
self.test_time = 12 * 60
self.sta_number = 3
self.send_delay = 50
# load param from excel
cmd_set = test_case["cmd set"]
for i in range(1, len(cmd_set)):
if cmd_set[i][0] != "dummy":
cmd_string = "self." + cmd_set[i][0]
exec cmd_string
self.result_cntx = TCActionBase.ResultCheckContext(self, test_env, self.tc_name)
pass
def cleanup(self):
checker_stings = []
test_action_strings = []
for i in range(self.sta_number + 2):
checker_stings.append("R SSC%s C +RECVPRINT:1" % (i+1))
test_action_strings.append("SSC SSC%s soc -R -o 1" % (i+1))
fail_string = "Fail, Fail to turn on recv print"
self.load_and_exe_one_step(checker_stings, test_action_strings, fail_string)
pass
def execute(self):
TCActionBase.TCActionBase.execute(self)
self.result_cntx.start()
try:
# configurable params
send_len = self.send_len
test_time = self.test_time * 60
# server port
server_port = self.server_port
server_port_t = self.server_port_2
# ap ip
# ap_ip = self.ap_ip
# server echo
server_echo = self.server_echo
# station number
sta_number = self.sta_number
# send delay
send_delay = self.send_delay
# configurable params
except StandardError, e:
NativeLog.add_trace_critical("Error configuration for TCPTransparent script, error is %s" % e)
raise StandardError("Error configuration")
# step0 reboot
checker_stings = []
test_action_string = []
for i in range(sta_number + 2):
checker_stings.append("P SSC%d C !!!ready!!!" % (i + 1))
test_action_string.append("SSCC SSC%d reboot" % (i + 1))
fail_string = "Fail, Fail to reboot"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# switch off recv print
checker_stings = []
test_action_strings = []
for i in range(self.sta_number + 2):
checker_stings.append("R SSC%s C +RECVPRINT:0" % (i+1))
test_action_strings.append("SSC SSC%s soc -R -o 0" % (i+1))
fail_string = "Fail, Fail to turn off recv print"
self.load_and_exe_one_step(checker_stings, test_action_strings, fail_string)
# step1, set ap/STA mode on all target
for i in range(sta_number + 2):
checker_stings = ["R SSC%d C +MODE:OK" % (i + 1)]
test_action_string = ["SSCC SSC%d op -S -o 3" % (i + 1)]
fail_string = "Fail, Fail to set mode on SSC%d" % (i + 1)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# set different getway for SSC1 softAP
checker_stings = ["R SSC1 C +DHCP:AP,OK"]
test_action_string = ["SSCC SSC1 dhcp -E -o 2"]
fail_string = "Fail, SSC1 Fail to disable DHCP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC1 C +IP:OK"]
test_action_string = ["SSCC SSC1 ip -S -o 2 -i 192.168.6.1"]
fail_string = "Fail, SSC1 Fail to set IP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC1 C +DHCP:AP,OK"]
test_action_string = ["SSCC SSC1 dhcp -S -o 2"]
fail_string = "Fail, SSC1 Fail to enable DHCP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# set different getway for SSC2 softAP
checker_stings = ["R SSC2 C +DHCP:AP,OK"]
test_action_string = ["SSCC SSC2 dhcp -E -o 2"]
fail_string = "Fail, SSC2 Fail to disable DHCP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC2 C +IP:OK"]
test_action_string = ["SSCC SSC2 ip -S -o 2 -i 192.168.5.1"]
fail_string = "Fail, SSC2 Fail to set IP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC2 C +DHCP:AP,OK"]
test_action_string = ["SSCC SSC2 dhcp -S -o 2"]
fail_string = "Fail, SSC2 Fail to enable DHCP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step2, set ssid/password on SSC1
ssid = "".join([random.choice(string.lowercase) for m in range(10)])
password = "".join([random.choice(string.lowercase) for m in range(10)])
checker_stings = ["R SSC1 C +SAP:OK"]
test_action_string = ["SSCC SSC1 ap -S -s %s -p %s -n 10 -t 0 -m 8" % (ssid, password)]
fail_string = "Fail, Fail to set ssid/password on SSC1"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step3, set ssid/password on SSC2
ssid_1 = "".join([random.choice(string.lowercase) for m in range(10)])
password_1 = "".join([random.choice(string.lowercase) for m in range(10)])
checker_stings = ["R SSC2 C +SAP:OK"]
test_action_string = ["SSCC SSC2 ap -S -s %s -p %s -n 10 -t 0 -m 8" % (ssid_1, password_1)]
fail_string = "Fail, Fail to set ap ssid/password on SSC2"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step4, SSC2 join SSC1(soft AP)
checker_stings = []
test_action_string = []
checker_stings.append("P SSC2 C +JAP:CONNECTED,%s" % ssid)
test_action_string.append("SSCC SSC2 ap -C -s %s -p %s" % (ssid, password))
fail_string = "Fail, Fail to connect to SSC1 SoftAP"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step5, create server on SSC1
checker_stings = ["R SSC1 A <server_sock>:BIND:(\d+),OK"]
test_action_string = ["SSCC SSC1 soc -B -t TCP -p %s" % server_port]
fail_string = "Fail, Fail to create server on SSC1 while binding"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC1 RE LISTEN:(\d+),OK"]
test_action_string = ["SSCC SSC1 soc -L -s <server_sock>"]
fail_string = "Fail, Fail to create server on SSC1 while listening"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step6, create client on SSC2
checker_stings = []
test_action_string = []
checker_stings.append("P SSC2 A <client_sock>:BIND:(\d+),OK")
test_action_string.append("SSCC SSC2 soc -B -t TCP")
fail_string = "Fail, SSC2 Fail to connect to server while binding"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["P SSC2 RE CONNECT:(\d+),OK", "P SSC1 A <accept_sock>:ACCEPT:(\d+),.+"]
test_action_string = ["SSCC SSC2 soc -C -s <client_sock> -i %s -p %s" % ("192.168.6.1", server_port)]
fail_string = "Fail, SSC2 Fail to connect to server while connecting"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step7, SSC3 - SSC5 join SSC2
checker_stings = []
test_action_string = []
for i in range(sta_number):
checker_stings.append("P SSC%d C +JAP:CONNECTED,%s" % (i + 3, ssid_1))
test_action_string.append("SSCC SSC%d ap -C -s %s -p %s" % (i + 3, ssid_1, password_1))
fail_string = "Fail, SSC%d Fail to connect to SSC2" % (i + 3)
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string, check_time=450) is False:
return
# step8, create server on SSC2
checker_stings = ["R SSC2 A <server_sock_t>:BIND:(\d+),OK"]
test_action_string = ["SSCC SSC2 soc -B -t TCP -p %s -i 192.168.5.1" % server_port_t]
fail_string = "Fail, Fail to create server one SSC2 while binding"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC2 RE LISTEN:(\d+),OK"]
test_action_string = ["SSCC SSC2 soc -L -s <server_sock_t>"]
fail_string = "Fail, Fail to create server one SSC2 while listening"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# step9, create client on SSC3 - SSC5
checker_stings = []
test_action_string = []
for i in range(sta_number):
checker_stings.append("P SSC%d A <client_sock%d>:BIND:(\d+),OK" % (i + 3, i + 3))
test_action_string.append("SSCC SSC%d soc -B -t TCP" % (i + 3))
fail_string = "Fail, Fail to connect to SSC2 server while binding"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
for i in range(sta_number):
checker_stings = ["P SSC%d RE CONNECT:(\d+),OK" % (i + 3),
"P SSC2 A <accept_sock%d>:ACCEPT:(\d+),.+" % (i + 3)]
test_action_string = ["SSCC SSC%d soc -C -s <client_sock%d> -i %s -p %s" %
(i + 3, i + 3, "192.168.5.1", server_port_t)]
fail_string = "Fail, Fail to connect to SSC2 server while connecting"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
start_time = time.time()
# step 10, do send/recv
while time.time() - start_time < test_time:
checker_stings = []
test_action_string = []
if server_echo is True:
test_action_string.append("SSC SSC1 soc -S -s <accept_sock> -l %d -n %d -j %d" %
(send_len, TEST_COUNT_ONE_ROUND, send_delay))
checker_stings.append("P SSC1 RE \+SEND:\d+,OK NC CLOSED")
test_action_string.append("SSC SSC2 soc -S -s <server_sock> -l %d -n %d -j %d" %
(send_len, TEST_COUNT_ONE_ROUND, send_delay))
checker_stings.append("P SSC2 RE \+SEND:\d+,OK NC CLOSED")
for i in range(sta_number):
checker_stings.append("P SSC%d RE \+SEND:\d+,OK NC CLOSED" % (i + 3))
test_action_string.append("SSC SSC%d soc -S -s <client_sock%d> -l %d -n %d -j %d" %
(i + 3, i + 3, send_len, TEST_COUNT_ONE_ROUND, send_delay))
for i in range(sta_number):
test_action_string.append("SSC SSC2 soc -S -s <accept_sock%d> -l %d -n %d -j %d" %
(i + 3, send_len, TEST_COUNT_ONE_ROUND, send_delay))
checker_stings.append("P SSC2 RE \+SEND:\d+,OK NC CLOSED")
fail_string = "Fail, Failed to send/recv data"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string,
check_freq=1, check_time=300) is False:
break
pass
NativeLog.add_prompt_trace("time escape: %s" % (time.time() - start_time))
if (time.time() - start_time) > test_time:
self.result_cntx.set_result("Succeed")
else:
self.result_cntx.set_result("Failed")
checker_stings = []
test_action_string = []
for i in range(sta_number + 2):
checker_stings.append("P SSC%d C CLOSEALL" % (i + 1))
test_action_string.append("SSCC SSC%d soc -T" % (i + 1))
fail_string = "Fail, Fail to close socket"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# re-set server on SSC1
server_port = random.randint(20000, 30000)
checker_stings = ["R SSC1 A <server_sock>:BIND:(\d+),OK"]
test_action_string = ["SSCC SSC1 soc -B -t TCP -p %s" % server_port]
fail_string = "Fail, Fail to bind socket"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC1 RE LISTEN:(\d+),OK"]
test_action_string = ["SSCC SSC1 soc -L -s <server_sock>"]
fail_string = "Fail, Fail to listen"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# SSC2 connnect SSC1
checker_stings = []
test_action_string = []
checker_stings.append("P SSC2 A <client_sock>:BIND:(\d+),OK")
test_action_string.append("SSCC SSC2 soc -B -t TCP")
fail_string = "Fail, SSC2 Fail to bind sock"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["P SSC2 RE CONNECT:(\d+),OK", "P SSC1 A <accept_sock>:ACCEPT:(\d+),.+"]
test_action_string = ["SSCC SSC2 soc -C -s <client_sock> -i %s -p %s" % ("192.168.6.1", server_port)]
fail_string = "Fail, SSC2 Fail to connect to SSC1 server"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# create server on SSC2
checker_stings = []
test_action_string = []
checker_stings.append("P SSC2 A <server_sock>:BIND:(\d+),OK")
test_action_string.append("SSCC SSC2 soc -B -t TCP -p %s -i 192.168.5.1" % server_port_t)
fail_string = "Fail, SSC2 Fail to bind"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
checker_stings = ["R SSC2 RE LISTEN:(\d+),OK"]
test_action_string = ["SSCC SSC2 soc -L -s <server_sock>"]
fail_string = "Fail, SSC2 Fail to listen"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
# create client on SSC3-SSC5
checker_stings = []
test_action_string = []
for i in range(sta_number):
checker_stings.append("P SSC%d A <client_sock%d>:BIND:(\d+),OK" % (i + 3, i + 3))
test_action_string.append("SSCC SSC%d soc -B -t TCP" % (i + 3))
fail_string = "Fail, Fail to connect to SSC2 server while binding"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
for i in range(sta_number):
checker_stings = ["P SSC%d RE CONNECT:(\d+),OK" % (i + 3),
"P SSC2 A <accept_sock%d>:ACCEPT:(\d+),.+" % (i + 3)]
test_action_string = ["SSCC SSC%d soc -C -s <client_sock%d> -i %s -p %s" %
(i + 3, i + 3, "192.168.5.1", server_port_t)]
fail_string = "Fail, Fail to connect to server"
if self.load_and_exe_one_step(checker_stings, test_action_string, fail_string) is False:
return
def result_check(self, port_name, data):
TCActionBase.CommonTCActionBase.result_check(self, port_name, data)
self.result_cntx.append_data(port_name, data)
def main():
pass
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
'''
Managing python installations with pyenv
======================================
This module is used to install and manage python installations with pyenv.
Different versions of python can be installed, and uninstalled. pyenv will
be installed automatically the first time it is needed and can be updated
later. This module will *not* automatically install packages which pyenv
will need to compile the versions of python.
If pyenv is run as the root user then it will be installed to /usr/local/pyenv,
otherwise it will be installed to the users ~/.pyenv directory. To make
pyenv available in the shell you may need to add the pyenv/shims and pyenv/bin
directories to the users PATH. If you are installing as root and want other
users to be able to access pyenv then you will need to add pyenv_ROOT to
their environment.
This is how a state configuration could look like:
.. code-block:: yaml
pyenv-deps:
pkg.installed:
- pkgs:
- make
- build-essential
- libssl-dev
- zlib1g-dev
- libbz2-dev
- libreadline-dev
- libsqlite3-dev
- wget
- curl
- llvm
python-2.6:
pyenv.absent:
- require:
- pkg: pyenv-deps
python-2.7.6:
pyenv.installed:
- default: True
- require:
- pkg: pyenv-deps
'''
# Import python libs
import re
# Import salt libs
import salt.utils
def _check_pyenv(ret, user=None):
'''
Check to see if pyenv is installed.
'''
if not __salt__['pyenv.is_installed'](user):
ret['result'] = False
ret['comment'] = 'pyenv is not installed.'
return ret
def _python_installed(ret, python, user=None):
'''
Check to see if given python is installed.
'''
default = __salt__['pyenv.default'](runas=user)
for version in __salt__['pyenv.versions'](user):
if version == python:
ret['result'] = True
ret['comment'] = 'Requested python exists.'
ret['default'] = default == python
break
return ret
def _check_and_install_python(ret, python, default=False, user=None):
'''
Verify that python is installed, install if unavailable
'''
ret = _python_installed(ret, python, user=user)
if not ret['result']:
if __salt__['pyenv.install_python'](python, runas=user):
ret['result'] = True
ret['changes'][python] = 'Installed'
ret['comment'] = 'Successfully installed python'
ret['default'] = default
else:
ret['result'] = False
ret['comment'] = 'Could not install python.'
return ret
if default:
__salt__['pyenv.default'](python, runas=user)
return ret
def installed(name, default=False, runas=None, user=None):
'''
Verify that the specified python is installed with pyenv. pyenv is
installed if necessary.
name
The version of python to install
default : False
Whether to make this python the default.
runas: None
The user to run pyenv as.
.. deprecated:: 0.17.0
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
if name.startswith('python-'):
name = re.sub(r'^python-', '', name)
if __opts__['test']:
ret['comment'] = 'python {0} is set to be installed'.format(name)
return ret
ret = _check_pyenv(ret, user)
if ret['result'] is False:
if not __salt__['pyenv.install'](user):
ret['comment'] = 'pyenv failed to install'
return ret
else:
return _check_and_install_python(ret, name, default, user=user)
else:
return _check_and_install_python(ret, name, default, user=user)
def _check_and_uninstall_python(ret, python, user=None):
'''
Verify that python is uninstalled
'''
ret = _python_installed(ret, python, user=user)
if ret['result']:
if ret['default']:
__salt__['pyenv.default']('system', runas=user)
if __salt__['pyenv.uninstall_python'](python, runas=user):
ret['result'] = True
ret['changes'][python] = 'Uninstalled'
ret['comment'] = 'Successfully removed python'
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to uninstall python'
return ret
else:
ret['result'] = True
ret['comment'] = 'python {0} is already absent'.format(python)
return ret
def absent(name, runas=None, user=None):
'''
Verify that the specified python is not installed with pyenv. pyenv
is installed if necessary.
name
The version of python to uninstall
runas: None
The user to run pyenv as.
.. deprecated:: 0.17.0
user: None
The user to run pyenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
if name.startswith('python-'):
name = re.sub(r'^python-', '', name)
if __opts__['test']:
ret['comment'] = 'python {0} is set to be uninstalled'.format(name)
return ret
ret = _check_pyenv(ret, user)
if ret['result'] is False:
ret['result'] = True
ret['comment'] = 'pyenv not installed, {0} not either'.format(name)
return ret
else:
return _check_and_uninstall_python(ret, name, user=user)
def install_pyenv(name, user=None):
'''
Install pyenv if not installed. Allows you to require pyenv be installed
prior to installing the plugins. Useful if you want to install pyenv
plugins via the git or file modules and need them installed before
installing any rubies.
Use the pyenv.root configuration option to set the path for pyenv if you
want a system wide install that is not in a user home dir.
user: None
The user to run pyenv as.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if __opts__['test']:
ret['comment'] = 'pyenv is set to be installed'
return ret
return _check_and_install_python(ret, user)
| |
# Copyright 2022 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.ConfigDict."""
import abc
from collections import abc as collections_abc
import functools
import json
import pickle
import sys
from absl.testing import absltest
from absl.testing import parameterized
import ml_collections
from ml_collections.config_dict import config_dict
import mock
import six
import yaml
_TEST_FIELD = {'int': 0}
_TEST_DICT = {
'float': 2.34,
'string': 'tom',
'int': 2,
'list': [1, 2],
'dict': {
'float': -1.23,
'int': 23
},
}
def _test_function():
pass
# Having ABCMeta as a metaclass shouldn't break yaml serialization.
class _TestClass(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self):
self.variable_1 = 1
self.variable_2 = '2'
_test_object = _TestClass()
class _TestClassNoStr():
pass
_TEST_DICT_BEST_EFFORT = dict(_TEST_DICT)
_TEST_DICT_BEST_EFFORT.update({
'unserializable': _TestClass,
'unserializable_no_str': _TestClassNoStr,
'function': _test_function,
'object': _test_object,
'set': {1, 2, 3}
})
# This is how we expect the _TEST_DICT to look after we change the name float to
# double using the function configdict.recursive_rename
_TEST_DICT_CHANGE_FLOAT_NAME = {
'double': 2.34,
'string': 'tom',
'int': 2,
'list': [1, 2],
'dict': {
'double': -1.23,
'int': 23
},
}
def _get_test_dict():
test_dict = dict(_TEST_DICT)
field = ml_collections.FieldReference(_TEST_FIELD)
test_dict['ref'] = field
test_dict['ref2'] = field
return test_dict
def _get_test_dict_best_effort():
test_dict = dict(_TEST_DICT_BEST_EFFORT)
field = ml_collections.FieldReference(_TEST_FIELD)
test_dict['ref'] = field
test_dict['ref2'] = field
return test_dict
def _get_test_config_dict():
return ml_collections.ConfigDict(_get_test_dict())
def _get_test_config_dict_best_effort():
return ml_collections.ConfigDict(_get_test_dict_best_effort())
_JSON_TEST_DICT = ('{"dict": {"float": -1.23, "int": 23},'
' "float": 2.34,'
' "int": 2,'
' "list": [1, 2],'
' "ref": {"int": 0},'
' "ref2": {"int": 0},'
' "string": "tom"}')
if six.PY2:
_DICT_TYPE = "!!python/name:__builtin__.dict ''"
_UNSERIALIZABLE_MSG = "unserializable object of type: <type 'classobj'>"
else:
_DICT_TYPE = "!!python/name:builtins.dict ''"
_UNSERIALIZABLE_MSG = (
"unserializable object: <class '__main__._TestClassNoStr'>")
_TYPES = {
'dict_type': _DICT_TYPE,
'configdict_type': '!!python/object:ml_collections.config_dict.config_dict'
'.ConfigDict',
'fieldreference_type': '!!python/object:ml_collections.config_dict'
'.config_dict.FieldReference'
}
_JSON_BEST_EFFORT_TEST_DICT = (
'{"dict": {"float": -1.23, "int": 23},'
' "float": 2.34,'
' "function": "function _test_function",'
' "int": 2,'
' "list": [1, 2],'
' "object": {"variable_1": 1, "variable_2": "2"},'
' "ref": {"int": 0},'
' "ref2": {"int": 0},'
' "set": [1, 2, 3],'
' "string": "tom",'
' "unserializable": "unserializable object: '
'<class \'__main__._TestClass\'>",'
' "unserializable_no_str": "%s"}') % _UNSERIALIZABLE_MSG
_REPR_TEST_DICT = """
dict:
float: -1.23
int: 23
float: 2.34
int: 2
list:
- 1
- 2
ref: &id001 {fieldreference_type}
_field_type: {dict_type}
_ops: []
_required: false
_value:
int: 0
ref2: *id001
string: tom
""".format(**_TYPES)
_STR_TEST_DICT = """
dict: {float: -1.23, int: 23}
float: 2.34
int: 2
list: [1, 2]
ref: &id001 {int: 0}
ref2: *id001
string: tom
"""
_STR_NESTED_TEST_DICT = """
dict: {float: -1.23, int: 23}
float: 2.34
int: 2
list: [1, 2]
nested_dict:
float: -1.23
int: 23
nested_dict:
float: -1.23
int: 23
non_nested_dict: {float: -1.23, int: 23}
nested_list:
- 1
- 2
- [3, 4, 5]
- 6
ref: &id001 {int: 0}
ref2: *id001
string: tom
"""
class ConfigDictTest(parameterized.TestCase):
"""Tests ConfigDict in config flags library."""
def assertEqualConfigs(self, cfg, dictionary):
"""Asserts recursive equality of config and a dictionary."""
self.assertEqual(cfg.to_dict(), dictionary)
def testCreating(self):
"""Tests basic config creation."""
cfg = ml_collections.ConfigDict()
cfg.field = 2.34
self.assertEqual(cfg.field, 2.34)
def testDir(self):
"""Test that dir() works correctly on config."""
cfg = ml_collections.ConfigDict()
cfg.field = 2.34
self.assertIn('field', dir(cfg))
self.assertIn('lock', dir(cfg))
def testFromDictConstruction(self):
"""Tests creation of config from existing dictionary."""
cfg = ml_collections.ConfigDict(_TEST_DICT)
self.assertEqualConfigs(cfg, _TEST_DICT)
def testOverridingValues(self):
"""Tests basic values overriding."""
cfg = ml_collections.ConfigDict()
cfg.field = 2.34
self.assertEqual(cfg.field, 2.34)
cfg.field = -2.34
self.assertEqual(cfg.field, -2.34)
def testDictAttributeTurnsIntoConfigDict(self):
"""Tests that dicts in a ConfigDict turn to ConfigDicts (recursively)."""
cfg = ml_collections.ConfigDict(_TEST_DICT)
# Test conversion to dict on creation.
self.assertIsInstance(cfg.dict, ml_collections.ConfigDict)
# Test conversion to dict on setting attribute.
new_dict = {'inside_dict': {'inside_key': 0}}
cfg.new_dict = new_dict
self.assertIsInstance(cfg.new_dict, ml_collections.ConfigDict)
self.assertIsInstance(cfg.new_dict.inside_dict, ml_collections.ConfigDict)
self.assertEqual(cfg.new_dict.to_dict(), new_dict)
def testOverrideExceptions(self):
"""Test the `int` and unicode-string exceptions to overriding.
ConfigDict forces strong type-checking with two exceptions. The first is
that `int` values can be stored to fields of type `float`. And secondly,
all string types can be stored in fields of type `str` or `unicode`.
"""
cfg = ml_collections.ConfigDict()
# Test that overriding 'float' fields with int works.
cfg.float_field = 2.34
cfg.float_field = 2
self.assertEqual(cfg.float_field, 2.0)
# Test that overriding with Unicode strings works.
cfg.string_field = '42'
cfg.string_field = u'42'
self.assertEqual(cfg.string_field, '42')
# Test that overriding a Unicode field with a `str` type works.
cfg.unicode_string_field = u'42'
cfg.unicode_string_field = '42'
self.assertEqual(cfg.unicode_string_field, u'42')
# Test that overriding a list with a tuple works.
cfg.tuple_field = [1, 2, 3]
cfg.tuple_field = (1, 2)
self.assertEqual(cfg.tuple_field, [1, 2])
# Test that overriding a tuple with a list works.
cfg.list_field = [23, 42]
cfg.list_field = (8, 9, 10)
self.assertEqual(cfg.list_field, [8, 9, 10])
# Test that int <-> long conversions work.
int_value = 1
# In Python 2, int(very large number) returns a long
long_value = int(1e100)
cfg.int_field = int_value
cfg.int_field = long_value
self.assertEqual(cfg.int_field, long_value)
if sys.version_info.major == 2:
expected = long
else:
expected = int
self.assertIsInstance(cfg.int_field, expected)
cfg.long_field = long_value
cfg.long_field = int_value
self.assertEqual(cfg.long_field, int_value)
self.assertIsInstance(cfg.long_field, expected)
def testOverrideCallable(self):
"""Test that overriding a callable with a callable works."""
class SomeClass:
def __init__(self, x, power=1):
self.y = x**power
def factory(self, x):
return SomeClass(self.y + x)
fn1 = SomeClass
fn2 = lambda x: SomeClass(x, power=2)
fn3 = functools.partial(SomeClass, power=3)
fn4 = SomeClass(4.0).factory
cfg = ml_collections.ConfigDict()
for orig in [fn1, fn2, fn3, fn4]:
for new in [fn1, fn2, fn3, fn4]:
cfg.fn_field = orig
cfg.fn_field = new
self.assertEqual(cfg.fn_field, new)
def testOverrideFieldReference(self):
"""Test overriding with FieldReference objects."""
cfg = ml_collections.ConfigDict()
cfg.field_1 = 'field_1'
cfg.field_2 = 'field_2'
# Override using a FieldReference.
cfg.field_1 = ml_collections.FieldReference('override_1')
# Override FieldReference field using another FieldReference.
cfg.field_1 = ml_collections.FieldReference('override_2')
# Override using empty FieldReference.
cfg.field_2 = ml_collections.FieldReference(None, field_type=str)
# Override FieldReference field using string.
cfg.field_2 = 'field_2'
# Check a TypeError is raised when using FieldReference's with wrong type.
with self.assertRaises(TypeError):
cfg.field_2 = ml_collections.FieldReference(1)
with self.assertRaises(TypeError):
cfg.field_2 = ml_collections.FieldReference(None, field_type=int)
def testTypeSafe(self):
"""Tests type safe checking."""
cfg = _get_test_config_dict()
with self.assertRaisesRegex(TypeError, 'field \'float\''):
cfg.float = 'tom'
# Test that float cannot be assigned to int.
with self.assertRaisesRegex(TypeError, 'field \'int\''):
cfg.int = 12.8
with self.assertRaisesRegex(TypeError, 'field \'string\''):
cfg.string = -123
with self.assertRaisesRegex(TypeError, 'field \'float\''):
cfg.dict.float = 'string'
# Ensure None is ignored by type safety
cfg.string = None
cfg.string = 'tom'
def testIgnoreType(self):
cfg = ml_collections.ConfigDict({
'string': 'This is a string',
'float': 3.0,
'list': [ml_collections.ConfigDict({'float': 1.0})],
'tuple': [ml_collections.ConfigDict({'float': 1.0})],
'dict': {
'float': 1.0
}
})
with cfg.ignore_type():
cfg.string = -123
cfg.float = 'string'
cfg.list[0].float = 'string'
cfg.tuple[0].float = 'string'
cfg.dict.float = 'string'
def testTypeUnsafe(self):
"""Tests lack of type safe checking."""
cfg = ml_collections.ConfigDict(_get_test_dict(), type_safe=False)
cfg.float = 'tom'
cfg.string = -123
cfg.int = 12.8
def testLocking(self):
"""Tests lock mechanism."""
cfg = ml_collections.ConfigDict()
cfg.field = 2
cfg.dict_field = {'float': 1.23, 'integer': 3}
cfg.ref = ml_collections.FieldReference(
ml_collections.ConfigDict({'integer': 0}))
cfg.lock()
cfg.field = -4
with self.assertRaises(AttributeError):
cfg.new_field = 2
with self.assertRaises(AttributeError):
cfg.dict_field.new_field = -1.23
with self.assertRaises(AttributeError):
cfg.ref.new_field = 1
with self.assertRaises(AttributeError):
del cfg.field
def testUnlocking(self):
"""Tests unlock mechanism."""
cfg = ml_collections.ConfigDict()
cfg.dict_field = {'float': 1.23, 'integer': 3}
cfg.ref = ml_collections.FieldReference(
ml_collections.ConfigDict({'integer': 0}))
cfg.lock()
with cfg.unlocked():
cfg.new_field = 2
cfg.dict_field.new_field = -1.23
cfg.ref.new_field = 1
def testGetMethod(self):
"""Tests get method."""
cfg = _get_test_config_dict()
self.assertEqual(cfg.get('float', -1), cfg.float)
self.assertEqual(cfg.get('ref', -1), cfg.ref)
self.assertEqual(cfg.get('another_key', -1), -1)
self.assertIsNone(cfg.get('another_key'))
def testItemsMethod(self):
"""Tests items method."""
cfg = _get_test_config_dict()
self.assertEqual(dict(**cfg), dict(cfg.items()))
items = cfg.items()
self.assertEqual(len(items), len(_get_test_dict()))
for entry in _TEST_DICT.items():
if isinstance(entry[1], dict):
entry = (entry[0], ml_collections.ConfigDict(entry[1]))
self.assertIn(entry, items)
self.assertIn(('ref', cfg.ref), items)
self.assertIn(('ref2', cfg.ref2), items)
ind_ref = items.index(('ref', cfg.ref))
ind_ref2 = items.index(('ref2', cfg.ref2))
self.assertIs(items[ind_ref][1], items[ind_ref2][1])
cfg = ml_collections.ConfigDict()
self.assertEqual(dict(**cfg), dict(cfg.items()))
# Test that items are sorted
self.assertEqual(sorted(dict(**cfg).items()), cfg.items())
def testGetItemRecursively(self):
"""Tests getting items recursively (e.g., config['a.b'])."""
cfg = _get_test_config_dict()
self.assertEqual(cfg['dict.float'], -1.23)
self.assertEqual('%(dict.int)i' % cfg, '23')
def testIterItemsMethod(self):
"""Tests iteritems method."""
cfg = _get_test_config_dict()
self.assertEqual(dict(**cfg), dict(cfg.iteritems()))
cfg = ml_collections.ConfigDict()
self.assertEqual(dict(**cfg), dict(cfg.iteritems()))
def testIterKeysMethod(self):
"""Tests iterkeys method."""
some_dict = {'x1': 32, 'x2': 5.2, 'x3': 'str'}
cfg = ml_collections.ConfigDict(some_dict)
self.assertEqual(set(six.iterkeys(some_dict)), set(six.iterkeys(cfg)))
# Test that keys are sorted
for k_ref, k in zip(sorted(six.iterkeys(cfg)), six.iterkeys(cfg)):
self.assertEqual(k_ref, k)
def testKeysMethod(self):
"""Tests keys method."""
some_dict = {'x1': 32, 'x2': 5.2, 'x3': 'str'}
cfg = ml_collections.ConfigDict(some_dict)
self.assertEqual(set(some_dict.keys()), set(cfg.keys()))
# Test that keys are sorted
for k_ref, k in zip(sorted(cfg.keys()), cfg.keys()):
self.assertEqual(k_ref, k)
def testLenMethod(self):
"""Tests keys method."""
some_dict = {'x1': 32, 'x2': 5.2, 'x3': 'str'}
cfg = ml_collections.ConfigDict(some_dict)
self.assertLen(cfg, len(some_dict))
def testIterValuesMethod(self):
"""Tests itervalues method."""
some_dict = {'x1': 32, 'x2': 5.2, 'x3': 'str'}
cfg = ml_collections.ConfigDict(some_dict)
self.assertEqual(set(six.itervalues(some_dict)), set(six.itervalues(cfg)))
# Test that items are sorted
for k_ref, v in zip(sorted(six.iterkeys(cfg)), six.itervalues(cfg)):
self.assertEqual(cfg[k_ref], v)
def testValuesMethod(self):
"""Tests values method."""
some_dict = {'x1': 32, 'x2': 5.2, 'x3': 'str'}
cfg = ml_collections.ConfigDict(some_dict)
self.assertEqual(set(some_dict.values()), set(cfg.values()))
# Test that items are sorted
for k_ref, v in zip(sorted(cfg.keys()), cfg.values()):
self.assertEqual(cfg[k_ref], v)
def testIterValuesResolvesReferences(self):
"""Tests itervalues FieldReference resolution."""
cfg = ml_collections.ConfigDict({'x1': 32, 'x2': 5.2, 'x3': 'str'})
ref = ml_collections.FieldReference(0)
cfg['x4'] = ref
for v in cfg.itervalues():
self.assertNotIsInstance(v, ml_collections.FieldReference)
self.assertIn(ref, cfg.itervalues(preserve_field_references=True))
def testValuesResolvesReferences(self):
"""Tests values FieldReference resolution."""
cfg = ml_collections.ConfigDict({'x1': 32, 'x2': 5.2, 'x3': 'str'})
ref = ml_collections.FieldReference(0)
cfg['x4'] = ref
for v in cfg.values():
self.assertNotIsInstance(v, ml_collections.FieldReference)
self.assertIn(ref, cfg.values(preserve_field_references=True))
def testIterItemsResolvesReferences(self):
"""Tests iteritems FieldReference resolution."""
cfg = ml_collections.ConfigDict({'x1': 32, 'x2': 5.2, 'x3': 'str'})
ref = ml_collections.FieldReference(0)
cfg['x4'] = ref
for _, v in cfg.iteritems():
self.assertNotIsInstance(v, ml_collections.FieldReference)
self.assertIn(('x4', ref), cfg.iteritems(preserve_field_references=True))
def testItemsResolvesReferences(self):
"""Tests items FieldReference resolution."""
cfg = ml_collections.ConfigDict({'x1': 32, 'x2': 5.2, 'x3': 'str'})
ref = ml_collections.FieldReference(0)
cfg['x4'] = ref
for _, v in cfg.items():
self.assertNotIsInstance(v, ml_collections.FieldReference)
self.assertIn(('x4', ref), cfg.items(preserve_field_references=True))
def testEquals(self):
"""Tests __eq__ and __ne__ methods."""
some_dict = {
'float': 1.23,
'integer': 3,
'list': [1, 2],
'dict': {
'a': {},
'b': 'string'
}
}
cfg = ml_collections.ConfigDict(some_dict)
cfg_other = ml_collections.ConfigDict(some_dict)
self.assertEqual(cfg, cfg_other)
self.assertEqual(ml_collections.ConfigDict(cfg), cfg_other)
cfg_other.float = 3
self.assertNotEqual(cfg, cfg_other)
cfg_other.float = cfg.float
cfg_other.list = ['a', 'b']
self.assertNotEqual(cfg, cfg_other)
cfg_other.list = cfg.list
cfg_other.lock()
self.assertNotEqual(cfg, cfg_other)
cfg_other.unlock()
cfg_other = ml_collections.ConfigDict(some_dict, type_safe=False)
self.assertNotEqual(cfg, cfg_other)
cfg = ml_collections.ConfigDict(some_dict)
# References that have the same id should be equal (even if self-references)
cfg_other = ml_collections.ConfigDict(some_dict)
cfg_other.me = cfg
cfg.me = cfg
self.assertEqual(cfg, cfg_other)
cfg = ml_collections.ConfigDict(some_dict)
cfg.me = cfg
self.assertEqual(cfg, cfg)
# Self-references that do not have the same id loop infinitely
cfg_other = ml_collections.ConfigDict(some_dict)
cfg_other.me = cfg_other
# Temporarily disable coverage trace while testing runtime is exceeded
trace_func = sys.gettrace()
sys.settrace(None)
with self.assertRaises(RuntimeError):
cfg == cfg_other # pylint:disable=pointless-statement
sys.settrace(trace_func)
def testEqAsConfigDict(self):
"""Tests .eq_as_configdict() method."""
cfg_1 = _get_test_config_dict()
cfg_2 = _get_test_config_dict()
cfg_2.added_field = 3.14159
cfg_self_ref = _get_test_config_dict()
cfg_self_ref.self_ref = cfg_self_ref
frozen_cfg_1 = ml_collections.FrozenConfigDict(cfg_1)
frozen_cfg_2 = ml_collections.FrozenConfigDict(cfg_2)
self.assertTrue(cfg_1.eq_as_configdict(cfg_1))
self.assertTrue(cfg_1.eq_as_configdict(frozen_cfg_1))
self.assertTrue(frozen_cfg_1.eq_as_configdict(cfg_1))
self.assertTrue(frozen_cfg_1.eq_as_configdict(frozen_cfg_1))
self.assertFalse(cfg_1.eq_as_configdict(cfg_2))
self.assertFalse(cfg_1.eq_as_configdict(frozen_cfg_2))
self.assertFalse(frozen_cfg_1.eq_as_configdict(cfg_self_ref))
self.assertFalse(frozen_cfg_1.eq_as_configdict(frozen_cfg_2))
self.assertFalse(cfg_self_ref.eq_as_configdict(cfg_1))
def testHash(self):
some_dict = {'float': 1.23, 'integer': 3}
cfg = ml_collections.ConfigDict(some_dict)
with self.assertRaisesRegex(TypeError, 'unhashable type'):
hash(cfg)
# Ensure Python realizes ConfigDict is not hashable.
self.assertNotIsInstance(cfg, collections_abc.Hashable)
def testDidYouMeanFeature(self):
"""Tests 'did you mean' suggestions."""
cfg = ml_collections.ConfigDict()
cfg.learning_rate = 0.01
cfg.lock()
with self.assertRaisesRegex(AttributeError,
'Did you mean.*learning_rate.*'):
_ = cfg.laerning_rate
with cfg.unlocked():
with self.assertRaisesRegex(AttributeError,
'Did you mean.*learning_rate.*'):
del cfg.laerning_rate
with self.assertRaisesRegex(AttributeError,
'Did you mean.*learning_rate.*'):
cfg.laerning_rate = 0.02
self.assertEqual(cfg.learning_rate, 0.01)
with self.assertRaises(AttributeError):
_ = self.laerning_rate
def testReferences(self):
"""Tests assigning references in the dict."""
cfg = _get_test_config_dict()
cfg.dict_ref = cfg.dict
self.assertEqual(cfg.dict_ref, cfg.dict)
def testPreserveReferences(self):
"""Tests that initializing with another ConfigDict preserves references."""
cfg = _get_test_config_dict()
# In the original, "ref" and "ref2" are the same FieldReference
self.assertIs(cfg.get_ref('ref'), cfg.get_ref('ref2'))
# Create a copy from the original
cfg2 = ml_collections.ConfigDict(cfg)
# If the refs had not been preserved, get_ref would create a new
# reference for each call
self.assertIs(cfg2.get_ref('ref'), cfg2.get_ref('ref2'))
self.assertIs(cfg2.ref, cfg2.ref2) # the values are also the same object
def testUnpacking(self):
"""Tests ability to pass ConfigDict instance with ** operator."""
cfg = ml_collections.ConfigDict()
cfg.x = 2
def foo(x):
return x + 3
self.assertEqual(foo(**cfg), 5)
def testUnpackingWithFieldReference(self):
"""Tests ability to pass ConfigDict instance with ** operator."""
cfg = ml_collections.ConfigDict()
cfg.x = ml_collections.FieldReference(2)
def foo(x):
return x + 3
self.assertEqual(foo(**cfg), 5)
def testReadingIncorrectField(self):
"""Tests whether accessing non-existing fields raises an exception."""
cfg = ml_collections.ConfigDict()
with self.assertRaises(AttributeError):
_ = cfg.non_existing_field
with self.assertRaises(KeyError):
_ = cfg['non_existing_field']
def testIteration(self):
"""Tests whether one can iterate over ConfigDict."""
cfg = ml_collections.ConfigDict()
for i in range(10):
cfg['field{}'.format(i)] = 'field{}'.format(i)
for field in cfg:
self.assertEqual(cfg[field], getattr(cfg, field))
def testDeYaml(self):
"""Tests YAML deserialization."""
cfg = _get_test_config_dict()
deyamled = yaml.load(cfg.to_yaml(), yaml.UnsafeLoader)
self.assertEqual(cfg, deyamled)
def testJSONConversion(self):
"""Tests JSON serialization."""
cfg = _get_test_config_dict()
self.assertEqual(
cfg.to_json(sort_keys=True).strip(), _JSON_TEST_DICT.strip())
cfg = _get_test_config_dict_best_effort()
with self.assertRaises(TypeError):
cfg.to_json()
def testJSONConversionCustomEncoder(self):
"""Tests JSON serialization with custom encoder."""
cfg = _get_test_config_dict()
encoder = json.JSONEncoder()
mock_encoder_cls = mock.MagicMock()
mock_encoder_cls.return_value = encoder
with mock.patch.object(encoder, 'default') as mock_default:
mock_default.return_value = ''
cfg.to_json(json_encoder_cls=mock_encoder_cls)
mock_default.assert_called()
def testJSONConversionBestEffort(self):
"""Tests JSON serialization."""
# Check that best effort option doesn't break default functionality
cfg = _get_test_config_dict()
self.assertEqual(
cfg.to_json_best_effort(sort_keys=True).strip(),
_JSON_TEST_DICT.strip())
cfg_best_effort = _get_test_config_dict_best_effort()
self.assertEqual(
cfg_best_effort.to_json_best_effort(sort_keys=True).strip(),
_JSON_BEST_EFFORT_TEST_DICT.strip())
def testReprConversion(self):
"""Tests repr conversion."""
cfg = _get_test_config_dict()
self.assertEqual(repr(cfg).strip(), _REPR_TEST_DICT.strip())
def testLoadFromRepr(self):
cfg_dict = ml_collections.ConfigDict()
field = ml_collections.FieldReference(1)
cfg_dict.r1 = field
cfg_dict.r2 = field
cfg_load = yaml.load(repr(cfg_dict), yaml.UnsafeLoader)
# Test FieldReferences are preserved
cfg_load['r1'].set(2)
self.assertEqual(cfg_load['r1'].get(), cfg_load['r2'].get())
def testStrConversion(self):
"""Tests conversion to str."""
cfg = _get_test_config_dict()
# Verify srt(cfg) doesn't raise errors.
_ = str(cfg)
test_dict_2 = _get_test_dict()
test_dict_2['nested_dict'] = {
'float': -1.23,
'int': 23,
'nested_dict': {
'float': -1.23,
'int': 23,
'non_nested_dict': {
'float': -1.23,
'int': 233,
},
},
'nested_list': [1, 2, [3, 44, 5], 6],
}
cfg_2 = ml_collections.ConfigDict(test_dict_2)
# Demonstrate that dot-access works.
cfg_2.nested_dict.nested_dict.non_nested_dict.int = 23
cfg_2.nested_dict.nested_list[2][1] = 4
# Verify srt(cfg) doesn't raise errors.
_ = str(cfg_2)
def testDotInField(self):
"""Tests trying to create a dot containing field."""
cfg = ml_collections.ConfigDict()
with self.assertRaises(ValueError):
cfg['invalid.name'] = 2.3
def testToDictConversion(self):
"""Tests whether references are correctly handled when calling to_dict."""
cfg = ml_collections.ConfigDict()
field = ml_collections.FieldReference('a string')
cfg.dict = {
'float': 2.3,
'integer': 1,
'field_ref1': field,
'field_ref2': field
}
cfg.ref = cfg.dict
cfg.self_ref = cfg
pure_dict = cfg.to_dict()
self.assertEqual(type(pure_dict), dict)
self.assertIs(pure_dict, pure_dict['self_ref'])
self.assertIs(pure_dict['dict'], pure_dict['ref'])
# Ensure ConfigDict has been converted to dict.
self.assertEqual(type(pure_dict['dict']), dict)
# Ensure FieldReferences are not preserved, by default.
self.assertNotIsInstance(pure_dict['dict']['field_ref1'],
ml_collections.FieldReference)
self.assertNotIsInstance(pure_dict['dict']['field_ref2'],
ml_collections.FieldReference)
self.assertEqual(pure_dict['dict']['field_ref1'], field.get())
self.assertEqual(pure_dict['dict']['field_ref2'], field.get())
pure_dict_with_refs = cfg.to_dict(preserve_field_references=True)
self.assertEqual(type(pure_dict_with_refs), dict)
self.assertEqual(type(pure_dict_with_refs['dict']), dict)
self.assertIsInstance(pure_dict_with_refs['dict']['field_ref1'],
ml_collections.FieldReference)
self.assertIsInstance(pure_dict_with_refs['dict']['field_ref2'],
ml_collections.FieldReference)
self.assertIs(pure_dict_with_refs['dict']['field_ref1'],
pure_dict_with_refs['dict']['field_ref2'])
# Ensure FieldReferences in the dict are not the same as the FieldReferences
# in the original ConfigDict.
self.assertIsNot(pure_dict_with_refs['dict']['field_ref1'],
cfg.dict['field_ref1'])
def testToDictTypeUnsafe(self):
"""Tests interaction between ignore_type() and to_dict()."""
cfg = ml_collections.ConfigDict()
cfg.string = ml_collections.FieldReference(None, field_type=str)
with cfg.ignore_type():
cfg.string = 1
self.assertEqual(1, cfg.to_dict(preserve_field_references=True)['string'])
def testCopyAndResolveReferences(self):
"""Tests the .copy_and_resolve_references() method."""
cfg = ml_collections.ConfigDict()
field = ml_collections.FieldReference('a string')
int_field = ml_collections.FieldReference(5)
cfg.dict = {
'float': 2.3,
'integer': 1,
'field_ref1': field,
'field_ref2': field,
'field_ref_int1': int_field,
'field_ref_int2': int_field + 5,
'placeholder': config_dict.placeholder(str),
'cfg': ml_collections.ConfigDict({
'integer': 1,
'int_field': int_field
})
}
cfg.ref = cfg.dict
cfg.self_ref = cfg
cfg_resolved = cfg.copy_and_resolve_references()
for field, value in [('float', 2.3), ('integer', 1),
('field_ref1', 'a string'), ('field_ref2', 'a string'),
('field_ref_int1', 5), ('field_ref_int2', 10),
('placeholder', None)]:
self.assertEqual(getattr(cfg_resolved.dict, field), value)
for field, value in [('integer', 1), ('int_field', 5)]:
self.assertEqual(getattr(cfg_resolved.dict.cfg, field), value)
self.assertIs(cfg_resolved, cfg_resolved['self_ref'])
self.assertIs(cfg_resolved['dict'], cfg_resolved['ref'])
def testCopyAndResolveReferencesConfigTypes(self):
"""Tests that .copy_and_resolve_references() handles special types."""
cfg_type_safe = ml_collections.ConfigDict()
int_field = ml_collections.FieldReference(5)
cfg_type_safe.field_ref1 = int_field
cfg_type_safe.field_ref2 = int_field + 5
cfg_type_safe.lock()
cfg_type_safe_locked_resolved = cfg_type_safe.copy_and_resolve_references()
self.assertTrue(cfg_type_safe_locked_resolved.is_locked)
self.assertTrue(cfg_type_safe_locked_resolved.is_type_safe)
cfg = ml_collections.ConfigDict(type_safe=False)
cfg.field_ref1 = int_field
cfg.field_ref2 = int_field + 5
cfg_resolved = cfg.copy_and_resolve_references()
self.assertFalse(cfg_resolved.is_locked)
self.assertFalse(cfg_resolved.is_type_safe)
cfg.lock()
cfg_locked_resolved = cfg.copy_and_resolve_references()
self.assertTrue(cfg_locked_resolved.is_locked)
self.assertFalse(cfg_locked_resolved.is_type_safe)
for resolved in [
cfg_type_safe_locked_resolved, cfg_resolved, cfg_locked_resolved
]:
self.assertEqual(resolved.field_ref1, 5)
self.assertEqual(resolved.field_ref2, 10)
frozen_cfg = ml_collections.FrozenConfigDict(cfg_type_safe)
frozen_cfg_resolved = frozen_cfg.copy_and_resolve_references()
for resolved in [frozen_cfg, frozen_cfg_resolved]:
self.assertEqual(resolved.field_ref1, 5)
self.assertEqual(resolved.field_ref2, 10)
self.assertIsInstance(resolved, ml_collections.FrozenConfigDict)
def testInitConfigDict(self):
"""Tests initializing a ConfigDict on a ConfigDict."""
cfg = _get_test_config_dict()
cfg_2 = ml_collections.ConfigDict(cfg)
self.assertIsNot(cfg_2, cfg)
self.assertIs(cfg_2.float, cfg.float)
self.assertIs(cfg_2.dict, cfg.dict)
# Ensure ConfigDict fields are initialized as is
dict_with_cfg_field = {'cfg': cfg}
cfg_3 = ml_collections.ConfigDict(dict_with_cfg_field)
self.assertIs(cfg_3.cfg, cfg)
# Now ensure it works with locking and type_safe
cfg_4 = ml_collections.ConfigDict(cfg, type_safe=False)
cfg_4.lock()
self.assertEqual(cfg_4, ml_collections.ConfigDict(cfg_4))
def testInitReferenceStructure(self):
"""Ensures initialization preserves reference structure."""
x = [1, 2, 3]
self_ref_dict = {
'float': 2.34,
'test_dict_1': _TEST_DICT,
'test_dict_2': _TEST_DICT,
'list': x
}
self_ref_dict['self'] = self_ref_dict
self_ref_dict['self_fr'] = ml_collections.FieldReference(self_ref_dict)
self_ref_cd = ml_collections.ConfigDict(self_ref_dict)
self.assertIs(self_ref_cd.test_dict_1, self_ref_cd.test_dict_2)
self.assertIs(self_ref_cd, self_ref_cd.self)
self.assertIs(self_ref_cd, self_ref_cd.self_fr)
self.assertIs(self_ref_cd.list, x)
self.assertEqual(self_ref_cd, self_ref_cd.self)
self_ref_cd.self.int = 1
self.assertEqual(self_ref_cd.int, 1)
self_ref_cd_2 = ml_collections.ConfigDict(self_ref_cd)
self.assertIsNot(self_ref_cd_2, self_ref_cd)
self.assertIs(self_ref_cd_2.self, self_ref_cd_2)
self.assertIs(self_ref_cd_2.test_dict_1, self_ref_cd.test_dict_1)
def testInitFieldReference(self):
"""Tests initialization with FieldReferences."""
test_dict = dict(x=1, y=1)
# Reference to a dict.
reference = ml_collections.FieldReference(test_dict)
cfg = ml_collections.ConfigDict()
cfg.reference = reference
self.assertIsInstance(cfg.reference, ml_collections.ConfigDict)
self.assertEqual(test_dict['x'], cfg.reference.x)
self.assertEqual(test_dict['y'], cfg.reference.y)
# Reference to a ConfigDict.
test_configdict = ml_collections.ConfigDict(test_dict)
reference = ml_collections.FieldReference(test_configdict)
cfg = ml_collections.ConfigDict()
cfg.reference = reference
test_configdict.x = 2
self.assertEqual(test_configdict.x, cfg.reference.x)
self.assertEqual(test_configdict.y, cfg.reference.y)
# Reference to a reference.
reference_int = ml_collections.FieldReference(0)
reference = ml_collections.FieldReference(reference_int)
cfg = ml_collections.ConfigDict()
cfg.reference = reference
reference_int.set(1)
self.assertEqual(reference_int.get(), cfg.reference)
def testDeletingFields(self):
"""Tests whether it is possible to delete fields."""
cfg = ml_collections.ConfigDict()
cfg.field1 = 123
cfg.field2 = 123
self.assertIn('field1', cfg)
self.assertIn('field2', cfg)
del cfg.field1
self.assertNotIn('field1', cfg)
self.assertIn('field2', cfg)
del cfg.field2
self.assertNotIn('field2', cfg)
with self.assertRaises(AttributeError):
del cfg.keys
with self.assertRaises(KeyError):
del cfg['keys']
def testDeletingNestedFields(self):
"""Tests whether it is possible to delete nested fields."""
cfg = ml_collections.ConfigDict({
'a': {
'aa': [1, 2],
},
'b': {
'ba': {
'baa': 2,
'bab': 3,
},
'bb': {1, 2, 3},
},
})
self.assertIn('a', cfg)
self.assertIn('aa', cfg.a)
self.assertIn('baa', cfg.b.ba)
del cfg['a.aa']
self.assertIn('a', cfg)
self.assertNotIn('aa', cfg.a)
del cfg['a']
self.assertNotIn('a', cfg)
del cfg['b.ba.baa']
self.assertIn('ba', cfg.b)
self.assertIn('bab', cfg.b.ba)
self.assertNotIn('baa', cfg.b.ba)
del cfg['b.ba']
self.assertNotIn('ba', cfg.b)
self.assertIn('bb', cfg.b)
with self.assertRaises(AttributeError):
del cfg.keys
with self.assertRaises(KeyError):
del cfg['keys']
def testSetAttr(self):
"""Tests whether it is possible to override an attribute."""
cfg = ml_collections.ConfigDict()
with self.assertRaises(AttributeError):
cfg.__setattr__('__class__', 'abc')
def testPickling(self):
"""Tests whether ConfigDict can be pickled and unpickled."""
cfg = _get_test_config_dict()
cfg.lock()
pickle_cfg = pickle.loads(pickle.dumps(cfg))
self.assertTrue(pickle_cfg.is_locked)
self.assertIsInstance(pickle_cfg, ml_collections.ConfigDict)
self.assertEqual(str(cfg), str(pickle_cfg))
def testPlaceholder(self):
"""Tests whether FieldReference works correctly as a placeholder."""
cfg_element = ml_collections.FieldReference(0)
cfg = ml_collections.ConfigDict({
'element': cfg_element,
'nested': {
'element': cfg_element
}
})
# Type mismatch.
with self.assertRaises(TypeError):
cfg.element = 'string'
cfg.element = 1
self.assertEqual(cfg.element, cfg.nested.element)
def testOptional(self):
"""Tests whether FieldReference works correctly as an optional field."""
# Type mismatch at construction.
with self.assertRaises(TypeError):
ml_collections.FieldReference(0, field_type=str)
# None default and field_type.
with self.assertRaises(ValueError):
ml_collections.FieldReference(None)
cfg = ml_collections.ConfigDict({
'default': ml_collections.FieldReference(0),
})
cfg.default = 1
self.assertEqual(cfg.default, 1)
def testOptionalNoDefault(self):
"""Tests optional field with no default value."""
cfg = ml_collections.ConfigDict({
'nodefault': ml_collections.FieldReference(None, field_type=str),
})
# Type mismatch with field with no default value.
with self.assertRaises(TypeError):
cfg.nodefault = 1
cfg.nodefault = 'string'
self.assertEqual(cfg.nodefault, 'string')
def testGetType(self):
"""Tests whether types are correct for FieldReference fields."""
cfg = ml_collections.ConfigDict()
cfg.integer = 123
cfg.ref = ml_collections.FieldReference(123)
cfg.ref_nodefault = ml_collections.FieldReference(None, field_type=int)
self.assertEqual(cfg.get_type('integer'), int)
self.assertEqual(cfg.get_type('ref'), int)
self.assertEqual(cfg.get_type('ref_nodefault'), int)
# Check errors in case of misspelled key.
with self.assertRaisesRegex(AttributeError, 'Did you.*ref_nodefault.*'):
cfg.get_type('ref_nodefualt')
with self.assertRaisesRegex(AttributeError, 'Did you.*integer.*'):
cfg.get_type('integre')
class ConfigDictUpdateTest(absltest.TestCase):
def testUpdateSimple(self):
"""Tests updating from one ConfigDict to another."""
first = ml_collections.ConfigDict()
first.x = 5
first.y = 'truman'
first.q = 2.0
second = ml_collections.ConfigDict()
second.x = 9
second.y = 'wilson'
second.z = 'washington'
first.update(second)
self.assertEqual(first.x, 9)
self.assertEqual(first.y, 'wilson')
self.assertEqual(first.z, 'washington')
self.assertEqual(first.q, 2.0)
def testUpdateNothing(self):
"""Tests updating a ConfigDict with no arguments."""
cfg = ml_collections.ConfigDict()
cfg.x = 5
cfg.y = 9
cfg.update()
self.assertLen(cfg, 2)
self.assertEqual(cfg.x, 5)
self.assertEqual(cfg.y, 9)
def testUpdateFromDict(self):
"""Tests updating a ConfigDict from a dict."""
cfg = ml_collections.ConfigDict()
cfg.x = 5
cfg.y = 9
cfg.update({'x': 6, 'z': 2})
self.assertEqual(cfg.x, 6)
self.assertEqual(cfg.y, 9)
self.assertEqual(cfg.z, 2)
def testUpdateFromKwargs(self):
"""Tests updating a ConfigDict from kwargs."""
cfg = ml_collections.ConfigDict()
cfg.x = 5
cfg.y = 9
cfg.update(x=6, z=2)
self.assertEqual(cfg.x, 6)
self.assertEqual(cfg.y, 9)
self.assertEqual(cfg.z, 2)
def testUpdateFromDictAndKwargs(self):
"""Tests updating a ConfigDict from a dict and kwargs."""
cfg = ml_collections.ConfigDict()
cfg.x = 5
cfg.y = 9
cfg.update({'x': 4, 'z': 2}, x=6)
self.assertEqual(cfg.x, 6) # kwarg overrides value from dict
self.assertEqual(cfg.y, 9)
self.assertEqual(cfg.z, 2)
def testUpdateFromMultipleDictTypeError(self):
"""Tests that updating a ConfigDict from two dicts raises a TypeError."""
cfg = ml_collections.ConfigDict()
cfg.x = 5
cfg.y = 9
with self.assertRaisesRegex(TypeError,
'update expected at most 1 arguments, got 2'):
cfg.update({'x': 4}, {'z': 2})
def testUpdateNested(self):
"""Tests updating a ConfigDict from a nested dict."""
cfg = ml_collections.ConfigDict()
cfg.subcfg = ml_collections.ConfigDict()
cfg.p = 5
cfg.q = 6
cfg.subcfg.y = 9
cfg.update({'p': 4, 'subcfg': {'y': 10, 'z': 5}})
self.assertEqual(cfg.p, 4)
self.assertEqual(cfg.q, 6)
self.assertEqual(cfg.subcfg.y, 10)
self.assertEqual(cfg.subcfg.z, 5)
def _assert_associated(self, cfg1, cfg2, key):
self.assertEqual(cfg1[key], cfg2[key])
cfg1[key] = 1
cfg2[key] = 2
self.assertEqual(cfg1[key], 2)
cfg1[key] = 3
self.assertEqual(cfg2[key], 3)
def testUpdateFieldReference(self):
"""Tests updating to/from FieldReference fields."""
# Updating FieldReference...
ref = ml_collections.FieldReference(1)
cfg = ml_collections.ConfigDict(dict(a=ref, b=ref))
# from value.
cfg.update(ml_collections.ConfigDict(dict(a=2)))
self.assertEqual(cfg.a, 2)
self.assertEqual(cfg.b, 2)
# from FieldReference.
error_message = 'Cannot update a FieldReference from another FieldReference'
with self.assertRaisesRegex(TypeError, error_message):
cfg.update(
ml_collections.ConfigDict(dict(a=ml_collections.FieldReference(2))))
with self.assertRaisesRegex(TypeError, error_message):
cfg.update(
ml_collections.ConfigDict(dict(b=ml_collections.FieldReference(2))))
# Updating empty ConfigDict with FieldReferences.
ref = ml_collections.FieldReference(1)
cfg_from = ml_collections.ConfigDict(dict(a=ref, b=ref))
cfg = ml_collections.ConfigDict()
cfg.update(cfg_from)
self._assert_associated(cfg, cfg_from, 'a')
self._assert_associated(cfg, cfg_from, 'b')
# Updating values with FieldReferences.
ref = ml_collections.FieldReference(1)
cfg_from = ml_collections.ConfigDict(dict(a=ref, b=ref))
cfg = ml_collections.ConfigDict(dict(a=2, b=3))
cfg.update(cfg_from)
self._assert_associated(cfg, cfg_from, 'a')
self._assert_associated(cfg, cfg_from, 'b')
def testUpdateFromFlattened(self):
cfg = ml_collections.ConfigDict({'a': 1, 'b': {'c': {'d': 2}}})
updates = {'a': 2, 'b.c.d': 3}
cfg.update_from_flattened_dict(updates)
self.assertEqual(cfg.a, 2)
self.assertEqual(cfg.b.c.d, 3)
def testUpdateFromFlattenedWithPrefix(self):
cfg = ml_collections.ConfigDict({'a': 1, 'b': {'c': {'d': 2}}})
updates = {'a': 2, 'b.c.d': 3}
cfg.b.update_from_flattened_dict(updates, 'b.')
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b.c.d, 3)
def testUpdateFromFlattenedNotFound(self):
cfg = ml_collections.ConfigDict({'a': 1, 'b': {'c': {'d': 2}}})
updates = {'a': 2, 'b.d.e': 3}
with self.assertRaisesRegex(
KeyError, 'Key "b.d.e" cannot be set as "b.d" was not found.'):
cfg.update_from_flattened_dict(updates)
def testUpdateFromFlattenedWrongType(self):
cfg = ml_collections.ConfigDict({'a': 1, 'b': {'c': {'d': 2}}})
updates = {'a.b.c': 2}
with self.assertRaisesRegex(
KeyError, 'Key "a.b.c" cannot be updated as "a" is not a ConfigDict.'):
cfg.update_from_flattened_dict(updates)
def testUpdateFromFlattenedTupleListConversion(self):
cfg = ml_collections.ConfigDict({
'a': 1,
'b': {
'c': {
'd': (1, 2, 3, 4, 5),
}
}
})
updates = {
'b.c.d': [2, 4, 6, 8],
}
cfg.update_from_flattened_dict(updates)
self.assertIsInstance(cfg.b.c.d, tuple)
self.assertEqual(cfg.b.c.d, (2, 4, 6, 8))
def testDecodeError(self):
# ConfigDict containing two strings with incompatible encodings.
cfg = ml_collections.ConfigDict({
'dill': pickle.dumps(_test_function, protocol=pickle.HIGHEST_PROTOCOL),
'unicode': u'unicode string'
})
expected_error = config_dict.JSONDecodeError if six.PY2 else TypeError
with self.assertRaises(expected_error):
cfg.to_json()
def testConvertDict(self):
"""Test automatic conversion, or not, of dict to ConfigDict."""
cfg = ml_collections.ConfigDict()
cfg.a = dict(b=dict(c=0))
self.assertIsInstance(cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(cfg.a.b, ml_collections.ConfigDict)
cfg = ml_collections.ConfigDict(convert_dict=False)
cfg.a = dict(b=dict(c=0))
self.assertNotIsInstance(cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(cfg.a, dict)
self.assertIsInstance(cfg.a['b'], dict)
def testConvertDictInInitialValue(self):
"""Test automatic conversion, or not, of dict to ConfigDict."""
initial_dict = dict(a=dict(b=dict(c=0)))
cfg = ml_collections.ConfigDict(initial_dict)
self.assertIsInstance(cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(cfg.a.b, ml_collections.ConfigDict)
cfg = ml_collections.ConfigDict(initial_dict, convert_dict=False)
self.assertNotIsInstance(cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(cfg.a, dict)
self.assertIsInstance(cfg.a['b'], dict)
def testConvertDictInCopyAndResolveReferences(self):
"""Test conversion, or not, of dict in copy and resolve references."""
cfg = ml_collections.ConfigDict()
cfg.a = dict(b=dict(c=0))
copied_cfg = cfg.copy_and_resolve_references()
self.assertIsInstance(copied_cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(copied_cfg.a.b, ml_collections.ConfigDict)
cfg = ml_collections.ConfigDict(convert_dict=False)
cfg.a = dict(b=dict(c=0))
copied_cfg = cfg.copy_and_resolve_references()
self.assertNotIsInstance(copied_cfg.a, ml_collections.ConfigDict)
self.assertIsInstance(copied_cfg.a, dict)
self.assertIsInstance(copied_cfg.a['b'], dict)
def testConvertDictTypeCompat(self):
"""Test that automatic conversion to ConfigDict doesn't trigger type errors."""
cfg = ml_collections.ConfigDict()
cfg.a = {}
self.assertIsInstance(cfg.a, ml_collections.ConfigDict)
# This checks that dict to configdict casting doesn't produce type mismatch.
cfg.a = {}
def testYamlNoConvert(self):
"""Test deserialisation from YAML without convert dict.
This checks backward compatibility of deserialisation.
"""
cfg = ml_collections.ConfigDict(dict(a=1))
self.assertTrue(yaml.load(cfg.to_yaml(), yaml.UnsafeLoader)._convert_dict)
def testRecursiveRename(self):
"""Test recursive_rename.
The dictionary should be the same but with the specified name changed.
"""
cfg = ml_collections.ConfigDict(_TEST_DICT)
new_cfg = config_dict.recursive_rename(cfg, 'float', 'double')
# Check that the new config has float changed to double as we expect
self.assertEqual(new_cfg.to_dict(), _TEST_DICT_CHANGE_FLOAT_NAME)
# Check that the original config is unchanged
self.assertEqual(cfg.to_dict(), _TEST_DICT)
def testGetOnewayRef(self):
cfg = config_dict.create(a=1)
cfg.b = cfg.get_oneway_ref('a')
cfg.a = 2
self.assertEqual(2, cfg.b)
cfg.b = 3
self.assertEqual(2, cfg.a)
self.assertEqual(3, cfg.b)
class CreateTest(absltest.TestCase):
def testBasic(self):
config = config_dict.create(a=1, b='b')
dct = {'a': 1, 'b': 'b'}
self.assertEqual(config.to_dict(), dct)
def testNested(self):
config = config_dict.create(
data=config_dict.create(game='freeway'),
model=config_dict.create(num_hidden=1000))
dct = {'data': {'game': 'freeway'}, 'model': {'num_hidden': 1000}}
self.assertEqual(config.to_dict(), dct)
class PlaceholderTest(absltest.TestCase):
def testBasic(self):
config = config_dict.create(a=1, b=config_dict.placeholder(int))
self.assertEqual(config.to_dict(), {'a': 1, 'b': None})
config.b = 5
self.assertEqual(config.to_dict(), {'a': 1, 'b': 5})
def testTypeChecking(self):
config = config_dict.create(a=1, b=config_dict.placeholder(int))
with self.assertRaises(TypeError):
config.b = 'chutney'
def testRequired(self):
config = config_dict.create(a=config_dict.required_placeholder(int))
ref = config.get_ref('a')
with self.assertRaises(config_dict.RequiredValueError):
config.a # pylint: disable=pointless-statement
with self.assertRaises(config_dict.RequiredValueError):
config.to_dict()
with self.assertRaises(config_dict.RequiredValueError):
ref.get()
config.a = 10
self.assertEqual(config.to_dict(), {'a': 10})
self.assertEqual(str(config), yaml.dump({'a': 10}))
# Reset to None and check we still get an error.
config.a = None
with self.assertRaises(config_dict.RequiredValueError):
config.a # pylint: disable=pointless-statement
# Set to a different value using the reference obtained calling get_ref().
ref.set(5)
self.assertEqual(config.to_dict(), {'a': 5})
self.assertEqual(str(config), yaml.dump({'a': 5}))
# dict placeholder.
test_dict = {'field': 10}
config = config_dict.create(
a=config_dict.required_placeholder(dict),
b=ml_collections.FieldReference(test_dict.copy()))
# ConfigDict initialization converts dict to ConfigDict.
self.assertEqual(test_dict, config.b.to_dict())
config.a = test_dict
self.assertEqual(test_dict, config.a)
class CycleTest(absltest.TestCase):
def testCycle(self):
config = config_dict.create(a=1)
config.b = config.get_ref('a') + config.get_ref('a')
self.assertFalse(config.get_ref('b').has_cycle())
with self.assertRaises(config_dict.MutabilityError):
config.a = config.get_ref('a')
with self.assertRaises(config_dict.MutabilityError):
config.a = config.get_ref('b')
if __name__ == '__main__':
absltest.main()
| |
"""Support for the Hive climate devices."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_BOOST,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers import config_validation as cv, entity_platform
from . import HiveEntity, refresh_system
from .const import (
ATTR_TIME_PERIOD,
DOMAIN,
SERVICE_BOOST_HEATING_OFF,
SERVICE_BOOST_HEATING_ON,
)
HIVE_TO_HASS_STATE = {
"SCHEDULE": HVAC_MODE_AUTO,
"MANUAL": HVAC_MODE_HEAT,
"OFF": HVAC_MODE_OFF,
}
HASS_TO_HIVE_STATE = {
HVAC_MODE_AUTO: "SCHEDULE",
HVAC_MODE_HEAT: "MANUAL",
HVAC_MODE_OFF: "OFF",
}
HIVE_TO_HASS_HVAC_ACTION = {
"UNKNOWN": CURRENT_HVAC_OFF,
False: CURRENT_HVAC_IDLE,
True: CURRENT_HVAC_HEAT,
}
TEMP_UNIT = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF]
SUPPORT_PRESET = [PRESET_NONE, PRESET_BOOST]
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
_LOGGER = logging.getLogger()
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Hive thermostat based on a config entry."""
hive = hass.data[DOMAIN][entry.entry_id]
devices = hive.session.deviceList.get("climate")
entities = []
if devices:
for dev in devices:
entities.append(HiveClimateEntity(hive, dev))
async_add_entities(entities, True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
"boost_heating",
{
vol.Required(ATTR_TIME_PERIOD): vol.All(
cv.time_period,
cv.positive_timedelta,
lambda td: td.total_seconds() // 60,
),
vol.Optional(ATTR_TEMPERATURE, default="25.0"): vol.Coerce(float),
},
"async_heating_boost",
)
platform.async_register_entity_service(
SERVICE_BOOST_HEATING_ON,
{
vol.Required(ATTR_TIME_PERIOD): vol.All(
cv.time_period,
cv.positive_timedelta,
lambda td: td.total_seconds() // 60,
),
vol.Optional(ATTR_TEMPERATURE, default="25.0"): vol.Coerce(float),
},
"async_heating_boost_on",
)
platform.async_register_entity_service(
SERVICE_BOOST_HEATING_OFF,
{},
"async_heating_boost_off",
)
class HiveClimateEntity(HiveEntity, ClimateEntity):
"""Hive Climate Device."""
def __init__(self, hive_session, hive_device):
"""Initialize the Climate device."""
super().__init__(hive_session, hive_device)
self.thermostat_node_id = hive_device["device_id"]
self.temperature_type = TEMP_UNIT.get(hive_device["temperatureunit"])
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {
"identifiers": {(DOMAIN, self.device["device_id"])},
"name": self.device["device_name"],
"model": self.device["deviceData"]["model"],
"manufacturer": self.device["deviceData"]["manufacturer"],
"sw_version": self.device["deviceData"]["version"],
"via_device": (DOMAIN, self.device["parentDevice"]),
}
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Climate device."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"]["online"]
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HIVE_TO_HASS_STATE[self.device["status"]["mode"]]
@property
def hvac_action(self):
"""Return current HVAC action."""
return HIVE_TO_HASS_HVAC_ACTION[self.device["status"]["action"]]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self.temperature_type
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device["status"]["current_temperature"]
@property
def target_temperature(self):
"""Return the target temperature."""
return self.device["status"]["target_temperature"]
@property
def min_temp(self):
"""Return minimum temperature."""
return self.device["min_temp"]
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device["max_temp"]
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self.device["status"]["boost"] == "ON":
return PRESET_BOOST
return None
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET
@refresh_system
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
new_mode = HASS_TO_HIVE_STATE[hvac_mode]
await self.hive.heating.setMode(self.device, new_mode)
@refresh_system
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
new_temperature = kwargs.get(ATTR_TEMPERATURE)
if new_temperature is not None:
await self.hive.heating.setTargetTemperature(self.device, new_temperature)
@refresh_system
async def async_set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
await self.hive.heating.setBoostOff(self.device)
elif preset_mode == PRESET_BOOST:
curtemp = round(self.current_temperature * 2) / 2
temperature = curtemp + 0.5
await self.hive.heating.setBoostOn(self.device, 30, temperature)
async def async_heating_boost(self, time_period, temperature):
"""Handle boost heating service call."""
_LOGGER.warning(
"Hive Service heating_boost will be removed in 2021.7.0, please update to heating_boost_on"
)
await self.async_heating_boost_on(time_period, temperature)
@refresh_system
async def async_heating_boost_on(self, time_period, temperature):
"""Handle boost heating service call."""
await self.hive.heating.setBoostOn(self.device, time_period, temperature)
@refresh_system
async def async_heating_boost_off(self):
"""Handle boost heating service call."""
await self.hive.heating.setBoostOff(self.device)
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.heating.getClimate(self.device)
| |
"""
New, fast version of the CloudPickler.
This new CloudPickler class can now extend the fast C Pickler instead of the
previous Python implementation of the Pickler class. Because this functionality
is only available for Python versions 3.8+, a lot of backward-compatibility
code is also removed.
Note that the C Pickler sublassing API is CPython-specific. Therefore, some
guards present in cloudpickle.py that were written to handle PyPy specificities
are not present in cloudpickle_fast.py
"""
import abc
import copyreg
import io
import itertools
import logging
import sys
import struct
import types
import weakref
import typing
from enum import Enum
from collections import ChainMap
from .compat import pickle, Pickler
from .cloudpickle import (
_extract_code_globals, _BUILTIN_TYPE_NAMES, DEFAULT_PROTOCOL,
_find_imported_submodules, _get_cell_contents, _is_importable,
_builtin_type, _get_or_create_tracker_id, _make_skeleton_class,
_make_skeleton_enum, _extract_class_dict, dynamic_subimport, subimport,
_typevar_reduce, _get_bases, _make_cell, _make_empty_cell, CellType,
_is_parametrized_type_hint, PYPY, cell_set,
parametrized_type_hint_getinitargs, _create_parametrized_type_hint,
builtin_code_type
)
if pickle.HIGHEST_PROTOCOL >= 5 and not PYPY:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None, buffer_callback=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
).dump(obj)
def dumps(obj, protocol=None, buffer_callback=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(
file, protocol=protocol, buffer_callback=buffer_callback
)
cp.dump(obj)
return file.getvalue()
else:
# Shorthands similar to pickle.dump/pickle.dumps
def dump(obj, file, protocol=None):
"""Serialize obj as bytes streamed into file
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
CloudPickler(file, protocol=protocol).dump(obj)
def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
speed between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
compatibility with older versions of Python.
"""
with io.BytesIO() as file:
cp = CloudPickler(file, protocol=protocol)
cp.dump(obj)
return file.getvalue()
load, loads = pickle.load, pickle.loads
# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
# -------------------------------------------------
def _class_getnewargs(obj):
type_kwargs = {}
if "__slots__" in obj.__dict__:
type_kwargs["__slots__"] = obj.__slots__
__dict__ = obj.__dict__.get('__dict__', None)
if isinstance(__dict__, property):
type_kwargs['__dict__'] = __dict__
return (type(obj), obj.__name__, _get_bases(obj), type_kwargs,
_get_or_create_tracker_id(obj), None)
def _enum_getnewargs(obj):
members = dict((e.name, e.value) for e in obj)
return (obj.__bases__, obj.__name__, obj.__qualname__, members,
obj.__module__, _get_or_create_tracker_id(obj), None)
# COLLECTION OF OBJECTS RECONSTRUCTORS
# ------------------------------------
def _file_reconstructor(retval):
return retval
# COLLECTION OF OBJECTS STATE GETTERS
# -----------------------------------
def _function_getstate(func):
# - Put func's dynamic attributes (stored in func.__dict__) in state. These
# attributes will be restored at unpickling time using
# f.__dict__.update(state)
# - Put func's members into slotstate. Such attributes will be restored at
# unpickling time by iterating over slotstate and calling setattr(func,
# slotname, slotvalue)
slotstate = {
"__name__": func.__name__,
"__qualname__": func.__qualname__,
"__annotations__": func.__annotations__,
"__kwdefaults__": func.__kwdefaults__,
"__defaults__": func.__defaults__,
"__module__": func.__module__,
"__doc__": func.__doc__,
"__closure__": func.__closure__,
}
f_globals_ref = _extract_code_globals(func.__code__)
f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in
func.__globals__}
closure_values = (
list(map(_get_cell_contents, func.__closure__))
if func.__closure__ is not None else ()
)
# Extract currently-imported submodules used by func. Storing these modules
# in a smoke _cloudpickle_subimports attribute of the object's state will
# trigger the side effect of importing these modules at unpickling time
# (which is necessary for func to work correctly once depickled)
slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
func.__code__, itertools.chain(f_globals.values(), closure_values))
slotstate["__globals__"] = f_globals
state = func.__dict__
return state, slotstate
def _class_getstate(obj):
clsdict = _extract_class_dict(obj)
clsdict.pop('__weakref__', None)
if issubclass(type(obj), abc.ABCMeta):
# If obj is an instance of an ABCMeta subclass, dont pickle the
# cache/negative caches populated during isinstance/issubclass
# checks, but pickle the list of registered subclasses of obj.
clsdict.pop('_abc_cache', None)
clsdict.pop('_abc_negative_cache', None)
clsdict.pop('_abc_negative_cache_version', None)
registry = clsdict.pop('_abc_registry', None)
if registry is None:
# in Python3.7+, the abc caches and registered subclasses of a
# class are bundled into the single _abc_impl attribute
clsdict.pop('_abc_impl', None)
(registry, _, _, _) = abc._get_dump(obj)
clsdict["_abc_impl"] = [subclass_weakref()
for subclass_weakref in registry]
else:
# In the above if clause, registry is a set of weakrefs -- in
# this case, registry is a WeakSet
clsdict["_abc_impl"] = [type_ for type_ in registry]
if "__slots__" in clsdict:
# pickle string length optimization: member descriptors of obj are
# created automatically from obj's __slots__ attribute, no need to
# save them in obj's state
if isinstance(obj.__slots__, str):
clsdict.pop(obj.__slots__)
else:
for k in obj.__slots__:
clsdict.pop(k, None)
clsdict.pop('__dict__', None) # unpicklable property object
return (clsdict, {})
def _enum_getstate(obj):
clsdict, slotstate = _class_getstate(obj)
members = dict((e.name, e.value) for e in obj)
# Cleanup the clsdict that will be passed to _rehydrate_skeleton_class:
# Those attributes are already handled by the metaclass.
for attrname in ["_generate_next_value_", "_member_names_",
"_member_map_", "_member_type_",
"_value2member_map_"]:
clsdict.pop(attrname, None)
for member in members:
clsdict.pop(member)
# Special handling of Enum subclasses
return clsdict, slotstate
# COLLECTIONS OF OBJECTS REDUCERS
# -------------------------------
# A reducer is a function taking a single argument (obj), and that returns a
# tuple with all the necessary data to re-construct obj. Apart from a few
# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
# correctly pickle an object.
# While many built-in objects (Exceptions objects, instances of the "object"
# class, etc), are shipped with their own built-in reducer (invoked using
# obj.__reduce__), some do not. The following methods were created to "fill
# these holes".
def _code_reduce(obj):
"""codeobject reducer"""
if hasattr(obj, "co_posonlyargcount"): # pragma: no branch
args = (
obj.co_argcount, obj.co_posonlyargcount,
obj.co_kwonlyargcount, obj.co_nlocals, obj.co_stacksize,
obj.co_flags, obj.co_code, obj.co_consts, obj.co_names,
obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars,
obj.co_cellvars
)
else:
args = (
obj.co_argcount, obj.co_kwonlyargcount, obj.co_nlocals,
obj.co_stacksize, obj.co_flags, obj.co_code, obj.co_consts,
obj.co_names, obj.co_varnames, obj.co_filename,
obj.co_name, obj.co_firstlineno, obj.co_lnotab,
obj.co_freevars, obj.co_cellvars
)
return types.CodeType, args
def _cell_reduce(obj):
"""Cell (containing values of a function's free variables) reducer"""
try:
obj.cell_contents
except ValueError: # cell is empty
return _make_empty_cell, ()
else:
return _make_cell, (obj.cell_contents, )
def _classmethod_reduce(obj):
orig_func = obj.__func__
return type(obj), (orig_func,)
def _file_reduce(obj):
"""Save a file"""
import io
if not hasattr(obj, "name") or not hasattr(obj, "mode"):
raise pickle.PicklingError(
"Cannot pickle files that do not map to an actual file"
)
if obj is sys.stdout:
return getattr, (sys, "stdout")
if obj is sys.stderr:
return getattr, (sys, "stderr")
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if obj.closed:
raise pickle.PicklingError("Cannot pickle closed files")
if hasattr(obj, "isatty") and obj.isatty():
raise pickle.PicklingError(
"Cannot pickle files that map to tty objects"
)
if "r" not in obj.mode and "+" not in obj.mode:
raise pickle.PicklingError(
"Cannot pickle files that are not opened for reading: %s"
% obj.mode
)
name = obj.name
retval = io.StringIO()
try:
# Read the whole file
curloc = obj.tell()
obj.seek(0)
contents = obj.read()
obj.seek(curloc)
except IOError as e:
raise pickle.PicklingError(
"Cannot pickle file %s as it cannot be read" % name
) from e
retval.write(contents)
retval.seek(curloc)
retval.name = name
return _file_reconstructor, (retval,)
def _getset_descriptor_reduce(obj):
return getattr, (obj.__objclass__, obj.__name__)
def _mappingproxy_reduce(obj):
return types.MappingProxyType, (dict(obj),)
def _memoryview_reduce(obj):
return bytes, (obj.tobytes(),)
def _module_reduce(obj):
if _is_importable(obj):
return subimport, (obj.__name__,)
else:
obj.__dict__.pop('__builtins__', None)
return dynamic_subimport, (obj.__name__, vars(obj))
def _method_reduce(obj):
return (types.MethodType, (obj.__func__, obj.__self__))
def _logger_reduce(obj):
return logging.getLogger, (obj.name,)
def _root_logger_reduce(obj):
return logging.getLogger, ()
def _property_reduce(obj):
return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
def _weakset_reduce(obj):
return weakref.WeakSet, (list(obj),)
def _dynamic_class_reduce(obj):
"""
Save a class that can't be stored as module global.
This method is used to serialize classes that are defined inside
functions, or that otherwise can't be serialized as attribute lookups
from global modules.
"""
if Enum is not None and issubclass(obj, Enum):
return (
_make_skeleton_enum, _enum_getnewargs(obj), _enum_getstate(obj),
None, None, _class_setstate
)
else:
return (
_make_skeleton_class, _class_getnewargs(obj), _class_getstate(obj),
None, None, _class_setstate
)
def _class_reduce(obj):
"""Select the reducer depending on the dynamic nature of the class obj"""
if obj is type(None): # noqa
return type, (None,)
elif obj is type(Ellipsis):
return type, (Ellipsis,)
elif obj is type(NotImplemented):
return type, (NotImplemented,)
elif obj in _BUILTIN_TYPE_NAMES:
return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
elif not _is_importable(obj):
return _dynamic_class_reduce(obj)
return NotImplemented
# COLLECTIONS OF OBJECTS STATE SETTERS
# ------------------------------------
# state setters are called at unpickling time, once the object is created and
# it has to be updated to how it was at unpickling time.
def _function_setstate(obj, state):
"""Update the state of a dynaamic function.
As __closure__ and __globals__ are readonly attributes of a function, we
cannot rely on the native setstate routine of pickle.load_build, that calls
setattr on items of the slotstate. Instead, we have to modify them inplace.
"""
state, slotstate = state
obj.__dict__.update(state)
obj_globals = slotstate.pop("__globals__")
obj_closure = slotstate.pop("__closure__")
# _cloudpickle_subimports is a set of submodules that must be loaded for
# the pickled function to work correctly at unpickling time. Now that these
# submodules are depickled (hence imported), they can be removed from the
# object's state (the object state only served as a reference holder to
# these submodules)
slotstate.pop("_cloudpickle_submodules")
obj.__globals__.update(obj_globals)
obj.__globals__["__builtins__"] = __builtins__
if obj_closure is not None:
for i, cell in enumerate(obj_closure):
try:
value = cell.cell_contents
except ValueError: # cell is empty
continue
cell_set(obj.__closure__[i], value)
for k, v in slotstate.items():
setattr(obj, k, v)
def _class_setstate(obj, state):
state, slotstate = state
registry = None
for attrname, attr in state.items():
if attrname == "_abc_impl":
registry = attr
else:
setattr(obj, attrname, attr)
if registry is not None:
for subclass in registry:
obj.register(subclass)
return obj
class CloudPickler(Pickler):
# set of reducers defined and used by cloudpickle (private)
_dispatch_table = {}
_dispatch_table[classmethod] = _classmethod_reduce
_dispatch_table[io.TextIOWrapper] = _file_reduce
_dispatch_table[logging.Logger] = _logger_reduce
_dispatch_table[logging.RootLogger] = _root_logger_reduce
_dispatch_table[memoryview] = _memoryview_reduce
_dispatch_table[property] = _property_reduce
_dispatch_table[staticmethod] = _classmethod_reduce
_dispatch_table[CellType] = _cell_reduce
_dispatch_table[types.CodeType] = _code_reduce
_dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
_dispatch_table[types.ModuleType] = _module_reduce
_dispatch_table[types.MethodType] = _method_reduce
_dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
_dispatch_table[weakref.WeakSet] = _weakset_reduce
_dispatch_table[typing.TypeVar] = _typevar_reduce
dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
# function reducers are defined as instance methods of CloudPickler
# objects, as they rely on a CloudPickler attribute (globals_ref)
def _dynamic_function_reduce(self, func):
"""Reduce a function that is not pickleable via attribute lookup."""
newargs = self._function_getnewargs(func)
state = _function_getstate(func)
return (types.FunctionType, newargs, state, None, None,
_function_setstate)
def _function_reduce(self, obj):
"""Reducer for function objects.
If obj is a top-level attribute of a file-backed module, this
reducer returns NotImplemented, making the CloudPickler fallback to
traditional _pickle.Pickler routines to save obj. Otherwise, it reduces
obj using a custom cloudpickle reducer designed specifically to handle
dynamic functions.
As opposed to cloudpickle.py, There no special handling for builtin
pypy functions because cloudpickle_fast is CPython-specific.
"""
if _is_importable(obj):
return NotImplemented
else:
return self._dynamic_function_reduce(obj)
def _function_getnewargs(self, func):
code = func.__code__
# base_globals represents the future global namespace of func at
# unpickling time. Looking it up and storing it in
# CloudpiPickler.globals_ref allow functions sharing the same globals
# at pickling time to also share them once unpickled, at one condition:
# since globals_ref is an attribute of a CloudPickler instance, and
# that a new CloudPickler is created each time pickle.dump or
# pickle.dumps is called, functions also need to be saved within the
# same invocation of cloudpickle.dump/cloudpickle.dumps (for example:
# cloudpickle.dumps([f1, f2])). There is no such limitation when using
# CloudPickler.dump, as long as the multiple invocations are bound to
# the same CloudPickler.
base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
if base_globals == {}:
# Add module attributes used to resolve relative imports
# instructions inside func.
for k in ["__package__", "__name__", "__path__", "__file__"]:
if k in func.__globals__:
base_globals[k] = func.__globals__[k]
# Do not bind the free variables before the function is created to
# avoid infinite recursion.
if func.__closure__ is None:
closure = None
else:
closure = tuple(
_make_empty_cell() for _ in range(len(code.co_freevars)))
return code, base_globals, None, None, closure
def dump(self, obj):
try:
return Pickler.dump(self, obj)
except RuntimeError as e:
if "recursion" in e.args[0]:
msg = (
"Could not pickle object as excessively deep recursion "
"required."
)
raise pickle.PicklingError(msg) from e
else:
raise
if pickle.HIGHEST_PROTOCOL >= 5:
# `CloudPickler.dispatch` is only left for backward compatibility - note
# that when using protocol 5, `CloudPickler.dispatch` is not an
# extension of `Pickler.dispatch` dictionary, because CloudPickler
# subclasses the C-implemented Pickler, which does not expose a
# `dispatch` attribute. Earlier versions of the protocol 5 CloudPickler
# used `CloudPickler.dispatch` as a class-level attribute storing all
# reducers implemented by cloudpickle, but the attribute name was not a
# great choice given the meaning of `Cloudpickler.dispatch` when
# `CloudPickler` extends the pure-python pickler.
dispatch = dispatch_table
# Implementation of the reducer_override callback, in order to
# efficiently serialize dynamic functions and classes by subclassing
# the C-implemented Pickler.
# TODO: decorrelate reducer_override (which is tied to CPython's
# implementation - would it make sense to backport it to pypy? - and
# pickle's protocol 5 which is implementation agnostic. Currently, the
# availability of both notions coincide on CPython's pickle and the
# pickle5 backport, but it may not be the case anymore when pypy
# implements protocol 5
def __init__(self, file, protocol=None, buffer_callback=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(
self, file, protocol=protocol, buffer_callback=buffer_callback
)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
self.proto = int(protocol)
def reducer_override(self, obj):
"""Type-agnostic reducing callback for function and classes.
For performance reasons, subclasses of the C _pickle.Pickler class
cannot register custom reducers for functions and classes in the
dispatch_table. Reducer for such types must instead implemented in
the special reducer_override method.
Note that method will be called for any object except a few
builtin-types (int, lists, dicts etc.), which differs from reducers
in the Pickler's dispatch_table, each of them being invoked for
objects of a specific type only.
This property comes in handy for classes: although most classes are
instances of the ``type`` metaclass, some of them can be instances
of other custom metaclasses (such as enum.EnumMeta for example). In
particular, the metaclass will likely not be known in advance, and
thus cannot be special-cased using an entry in the dispatch_table.
reducer_override, among other things, allows us to register a
reducer that will be called for any class, independently of its
type.
Notes:
* reducer_override has the priority over dispatch_table-registered
reducers.
* reducer_override can be used to fix other limitations of
cloudpickle for other types that suffered from type-specific
reducers, such as Exceptions. See
https://github.com/cloudpipe/cloudpickle/issues/248
"""
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
return (
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj)
)
t = type(obj)
try:
is_anyclass = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
is_anyclass = False
if is_anyclass:
return _class_reduce(obj)
elif isinstance(obj, types.FunctionType):
return self._function_reduce(obj)
else:
# fallback to save_global, including the Pickler's
# distpatch_table
return NotImplemented
else:
# When reducer_override is not available, hack the pure-Python
# Pickler's types.FunctionType and type savers. Note: the type saver
# must override Pickler.save_global, because pickle.py contains a
# hard-coded call to save_global when pickling meta-classes.
dispatch = Pickler.dispatch.copy()
def __init__(self, file, protocol=None):
if protocol is None:
protocol = DEFAULT_PROTOCOL
Pickler.__init__(self, file, protocol=protocol)
# map functions __globals__ attribute ids, to ensure that functions
# sharing the same global namespace at pickling time also share
# their global namespace at unpickling time.
self.globals_ref = {}
assert hasattr(self, 'proto')
def _save_reduce_pickle5(self, func, args, state=None, listitems=None,
dictitems=None, state_setter=None, obj=None):
save = self.save
write = self.write
self.save_reduce(
func, args, state=None, listitems=listitems,
dictitems=dictitems, obj=obj
)
# backport of the Python 3.8 state_setter pickle operations
save(state_setter)
save(obj) # simple BINGET opcode as obj is already memoized.
save(state)
write(pickle.TUPLE2)
# Trigger a state_setter(obj, state) function call.
write(pickle.REDUCE)
# The purpose of state_setter is to carry-out an
# inplace modification of obj. We do not care about what the
# method might return, so its output is eventually removed from
# the stack.
write(pickle.POP)
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None): # noqa
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
if sys.version_info[:2] < (3, 7) and _is_parametrized_type_hint(obj): # noqa # pragma: no branch
# Parametrized typing constructs in Python < 3.7 are not
# compatible with type checks and ``isinstance`` semantics. For
# this reason, it is easier to detect them using a
# duck-typing-based check (``_is_parametrized_type_hint``) than
# to populate the Pickler's dispatch with type-specific savers.
self.save_reduce(
_create_parametrized_type_hint,
parametrized_type_hint_getinitargs(obj),
obj=obj
)
elif name is not None:
Pickler.save_global(self, obj, name=name)
elif not _is_importable(obj, name=name):
self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
else:
Pickler.save_global(self, obj, name=name)
dispatch[type] = save_global
def save_function(self, obj, name=None):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
if _is_importable(obj, name=name):
return Pickler.save_global(self, obj, name=name)
elif PYPY and isinstance(obj.__code__, builtin_code_type):
return self.save_pypy_builtin_func(obj)
else:
return self._save_reduce_pickle5(
*self._dynamic_function_reduce(obj), obj=obj
)
def save_pypy_builtin_func(self, obj):
"""Save pypy equivalent of builtin functions.
PyPy does not have the concept of builtin-functions. Instead,
builtin-functions are simple function instances, but with a
builtin-code attribute.
Most of the time, builtin functions should be pickled by attribute.
But PyPy has flaky support for __qualname__, so some builtin
functions such as float.__new__ will be classified as dynamic. For
this reason only, we created this special routine. Because
builtin-functions are not expected to have closure or globals,
there is no additional hack (compared the one already implemented
in pickle) to protect ourselves from reference cycles. A simple
(reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
also that PyPy improved their support for __qualname__ in v3.6, so
this routing should be removed when cloudpickle supports only PyPy
3.6 and later.
"""
rv = (types.FunctionType, (obj.__code__, {}, obj.__name__,
obj.__defaults__, obj.__closure__),
obj.__dict__)
self.save_reduce(*rv, obj=obj)
dispatch[types.FunctionType] = save_function
| |
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import qt.QtCore as qc
import qt.QtGui as qg
import popupcad
from popupcad.filetypes.operation2 import Operation2
from popupcad.widgets.table_editor_popup import Table, SingleItemListElement,IntegerElement,Row, TableControl, DraggableTreeElement,Delegate
from popupcad.widgets.listmanager import DesignListManager
class SketchData(object):
def __init__(self, ref1, ref2):
self.ref1 = ref1
self.ref2 = ref2
class InputData(object):
def __init__(self, ref1, ref2,shift):
self.ref1 = ref1
self.ref2 = ref2
self.shift = shift
class OutputData(object):
def __init__(self, ref1,shift):
self.ref1 = ref1
self.shift = shift
class SketchRow(Row):
def __init__(self, get_sketches, get_layers):
elements = []
elements.append(SingleItemListElement('to replace', get_sketches))
elements.append(SingleItemListElement('replace with', get_layers))
self.elements = elements
class InputRow(Row):
def __init__(self, get_sketches, get_layers):
elements = []
elements.append(DraggableTreeElement('to replace', get_sketches))
elements.append(DraggableTreeElement('replace with', get_layers))
elements.append(IntegerElement('shift'))
self.elements = elements
class OutputRow(Row):
def __init__(self, get_sketches):
elements = []
elements.append(DraggableTreeElement('output', get_sketches))
elements.append(IntegerElement('shift'))
self.elements = elements
class MainWidget(qg.QDialog):
def __init__(self, design, sketches, layers, operations, jointop=None):
super(MainWidget, self).__init__()
self.design = design
self.sketches = sketches
self.layers = layers
self.operations = operations
self.designwidget = DesignListManager(design)
self.input_table = Table(InputRow(self.get_subdesign_operations,self.get_operations),Delegate)
self.sketch_table = Table(SketchRow(self.get_subdesign_sketches,self.get_sketches),Delegate)
self.output_table = Table(OutputRow(self.get_subdesign_operations),Delegate)
self.sketch_control = TableControl(self.sketch_table, self)
self.input_control = TableControl(self.input_table, self)
self.output_control = TableControl(self.output_table, self)
button_ok = qg.QPushButton('Ok')
button_cancel = qg.QPushButton('Cancel')
button_ok.clicked.connect(self.accept)
button_cancel.clicked.connect(self.reject)
sublayout2 = qg.QHBoxLayout()
sublayout2.addWidget(button_ok)
sublayout2.addWidget(button_cancel)
layout = qg.QVBoxLayout()
layout.addWidget(self.designwidget)
layout.addWidget(self.sketch_control)
layout.addWidget(self.input_control)
layout.addWidget(self.output_control)
layout.addLayout(sublayout2)
self.setLayout(layout)
if jointop is not None:
subdesign = design.subdesigns[jointop.design_links['source'][0]]
for ii in range(self.designwidget.itemlist.count()):
item = self.designwidget.itemlist.item(ii)
if item.value == subdesign:
item.setSelected(True)
for item in jointop.sketch_list:
self.sketch_table.row_add([subdesign.sketches[item.ref1]], [design.sketches[item.ref2]])
for item in jointop.input_list:
index1 = self.subdesign().operation_index(item.ref1[0])
output1 = item.ref1[1]
index2 = self.design.operation_index(item.ref2[0])
output2 = item.ref2[1]
self.input_table.row_add([(index1, output1)], [(index2, output2)],item.shift)
for item in jointop.output_list:
index1 = self.subdesign().operation_index(item.ref1[0])
output1 = item.ref1[1]
self.output_table.row_add([(index1, output1)],item.shift)
self.designwidget.itemlist.itemSelectionChanged.connect(
self.input_table.reset)
self.designwidget.itemlist.itemSelectionChanged.connect(
self.output_table.reset)
self.designwidget.itemlist.itemSelectionChanged.connect(
self.sketch_table.reset)
def get_operations(self):
return self.operations
def get_subdesign_operations(self):
return self.subdesign().operations
def get_sketches(self):
return self.design.sketches.values()
def get_subdesign_sketches(self):
return self.subdesign().sketches.values()
def subdesign(self):
try:
return self.designwidget.itemlist.selectedItems()[0].value
except IndexError:
return None
def acceptdata(self):
sketch_list = []
data = self.sketch_table.export_data()
for sketch1, sketch2 in data:
sketch_list.append(SketchData(sketch1[0].id, sketch2[0].id))
input_list = []
for refs_from, refs_to,shift in self.input_table.export_data():
op1_index, op1_output = refs_from[0]
op2_index, op2_output = refs_to[0]
op1_ref = self.subdesign().operations[op1_index].id
op2_ref = self.design.operations[op2_index].id
input_list.append(InputData((op1_ref, op1_output), (op2_ref, op2_output),shift))
output_list = []
for ref1,shift in self.output_table.export_data():
op1_index, op1_output = ref1[0]
op1_ref = self.subdesign().operations[op1_index].id
output_list.append(OutputData((op1_ref, op1_output),shift))
design_links = {}
design_links['source'] = [self.subdesign().id]
return design_links, sketch_list, input_list, output_list
class SubOperation2(Operation2):
name = 'SubOp'
def copy(self):
new = type(self)(
self.design_links.copy(),
self.sketch_list[:],
self.input_list[:],
self.output_list[:])
new.id = self.id
new.customname = self.customname
return new
def upgrade(self):
if isinstance(self.design_links['source'],int):
design_links = {'source':[self.design_links['source']]}
new = type(self)(
design_links,
self.sketch_list[:],
self.input_list[:],
self.output_list[:])
new.id = self.id
new.customname = self.customname
return new
else:
return self
def __init__(self, *args):
super(SubOperation2, self).__init__()
self.editdata(*args)
self.id = id(self)
def editdata(self, design_links, sketch_list, input_list, output_list):
super(SubOperation2, self).editdata({}, {}, design_links)
self.sketch_list = sketch_list
self.design_links = design_links
self.input_list = input_list
self.output_list = output_list
def replace_op_refs(self, refold, refnew):
for item in self.input_list:
if item.ref2 == refold:
item.ref2 = refnew
self.clear_output()
def replace_op_refs2(self, refold, refnew):
for item in self.input_list:
if item.ref2[0] == refold:
item.ref2 = (refnew,item.ref2[1])
self.clear_output()
def parentrefs(self):
return [item.ref2[0] for item in self.input_list]
@classmethod
def buildnewdialog(cls, design, currentop):
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.operations)
return dialog
def buildeditdialog(self, design):
dialog = MainWidget(
design,
design.sketches.values(),
design.return_layer_definition().layers,
design.prioroperations(self),
self)
return dialog
def generate(self, design):
from popupcad.manufacturing.dummy_operation1 import DummyOp1
subdesign_orig = design.subdesigns[self.design_links['source'][0]]
subdesign = subdesign_orig.copy_yaml()
sketches = design.sketches.copy()
for key,value in sketches.items():
sketches[key] = value.copy()
subdesign.sketches.update(sketches)
layerdef_subdesign = subdesign.return_layer_definition()
layerdef_design = design.return_layer_definition()
for sketch_data in self.sketch_list:
from_ref = sketch_data.ref1
to_ref = sketch_data.ref2
subdesign.replace_sketch_refs_force(from_ref, to_ref)
for input_data in self.input_list:
from_ref = input_data.ref1
to_ref = input_data.ref2
csg = design.op_from_ref(to_ref[0]).output[to_ref[1]].csg
csg2 = popupcad.algorithms.manufacturing_functions.shift_flip_rotate(csg,input_data.shift,False,False)
csg3 = csg2.switch_layer_defs(layerdef_subdesign)
dummy_op = DummyOp1(csg3)
to_ref2 = (dummy_op.id,0)
subdesign.insert_operation(0, dummy_op)
subdesign.replace_op_refs_force(from_ref, to_ref2)
subdesign.reprocessoperations()
self.output = []
for output_data in self.output_list:
new_output = subdesign.op_from_ref(output_data.ref1[0]).output[output_data.ref1[1]]
csg= new_output.csg
csg2 = csg.switch_layer_defs(layerdef_design)
csg3 = popupcad.algorithms.manufacturing_functions.shift_flip_rotate(csg2,output_data.shift,False,False)
output2 = popupcad.filetypes.operationoutput.OperationOutput(csg3,new_output.name)
self.output.append(output2)
def parentrefs(self):
a = []
for item in self.input_list:
a.append(item.ref2[0])
return a
def sketchrefs(self):
a = []
for item in self.sketch_list:
a.append(item.ref2)
return a
def replace_sketch_refs(self, refold, refnew):
for item in self.sketch_list:
if item.ref2 == refold:
item.ref2 = refnew
self.clear_output()
| |
"""
Copyright 2015, Cisco Systems, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Pravin Gohite, Cisco Systems, Inc.
"""
import os
import logging
import lxml.etree as ET
class Cxml:
def __init__(self, filename):
self.filename = filename
self.modulename = os.path.splitext(os.path.basename(filename))[0]
if os.path.exists(filename):
logging.debug('Parsing %s' % filename)
try:
self.cxml = ET.parse(filename)
except:
self.cxml = None
logging.error('ET Failed to parse %s' % filename)
else:
self.cxml = None
logging.error('File %s does not exists' % filename)
def getroot(self):
return self.cxml.getroot()
def toxpath(self, path):
if path:
path_elems = path.split('/')
xpath = '[@name="%s"]' % path_elems[0]
for elem in path_elems[1:]:
xpath += '/node[@name="%s"]' % elem
else:
xpath = '/'
return xpath
def toxpath_filter(self, path, prefix):
"""
Returns an xpath filter that is used in RPCs.
For example: module/node1/node2 -> /module_prefix:node1/node2
YANG-Push subscribers use it to specify
data of interest in establish-subscription RPCs.
"""
if path and prefix:
path_elems = path.split('/')
xpath = path.replace(path_elems[0] + '/',
'/' + prefix + ':', 1)
else:
xpath = ''
return xpath
def get_lazy_node_internal(self, cxml_element, base='', module_prefix=''):
node = ET.Element('node')
add_placeholder = True
# Add attributes from cxml node
for attrib in cxml_element.attrib:
node.set(attrib, cxml_element.attrib[attrib])
# Terminal nodes does not requires to lazy nodes.
if (attrib == 'type' and cxml_element.attrib[attrib] in \
['leaf', 'leafref', 'leaf-list']):
add_placeholder = False
try:
desc = cxml_element.find('description')
if desc is not None:
node.set('description', desc.text.strip())
except:
pass
if base == '':
node.set('path', self.modulename)
else:
base += '/'
node.set('path', base + cxml_element.get('name'))
if base != '' and module_prefix != '':
xpath_filter = self.toxpath_filter(base + cxml_element.get('name'),
module_prefix)
node.set('xpath_filter', xpath_filter)
if add_placeholder:
pnode = ET.Element('node')
pnode.set('name', 'Loading ..')
pnode.set('type', '__yang_placeholder')
node.append(pnode)
return node
def get_lazy_node(self, path='', add_ns=True):
"""
Returns yang explorer compatible lazy node xml. A lazy
node only returns a cxml node which is requested. All
other node along the path returned as _placeholder_
nodes for on-demand loading in client tree.
"""
logging.debug('get_lazy_node: ' + path)
root = ET.Element('root')
if self.cxml is None:
return root
cxml_root = self.getroot()
if path == '':
node = self.get_lazy_node_internal(cxml_root)
nslist = [c.get('prefix') + ',' + c.text for c in cxml_root if c.tag == 'namespace']
node.set('namespaces', '|'.join(nslist))
node.set('name', self.modulename)
root.append(node)
return root
module_prefix = cxml_root.get('prefix', '')
# move root node to requested node
elements = path.split('/')
for name in elements[1:]:
for child in cxml_root:
if child.get('name', '') == name:
cxml_root = child
break
for child in cxml_root:
if child.tag == 'node':
node = self.get_lazy_node_internal(child, path, module_prefix)
root.append(node)
if child.tag == 'namespace' and add_ns:
if cxml_root.get('prefix', '') == child.get('prefix'):
child.set('default', 'true')
root.append(child)
return root
def get_lazy_tree_one(self, path, value):
"""
Returns yang explorer compatible lazy tree xml. A lazy
tree returns a cxml nested tree from root to requested
node.
Other node along the path returned as _placeholder_
nodes for on-demand loading in client tree.
"""
tree = None
path_elems = path.split('/')
subpath = xpath = ''
for elems in path_elems:
nodes = self.get_lazy_node(subpath)
if tree is None:
tree = nodes.find('node')
xpath = '[@name="%s"]' % elems
logging.info(ET.tostring(tree))
else:
subpath += '/'
temp = tree.find(xpath)
if temp is not None:
tree.find(xpath).remove(tree.find(xpath)[0])
for child in nodes:
if child.get('path') == path:
child.set('value', value)
tree.find(xpath).append(child)
xpath += '/node[@name="%s"]' % elems
subpath += elems
return tree
def get_lazy_tree(self, pathvalues):
"""
Returns yang explorer compatible lazy tree xml. A lazy
tree returns a cxml nested tree from root to requested
node.
Other node along the path returned as _placeholder_
nodes for on-demand loading in client tree.
"""
logging.debug('get_lazy_tree: Building lazy tree..')
plist = []
vdict = {}
for (path, value) in pathvalues:
plist.append(path.split('/'))
vdict[path] = value
level = 0
logging.info(str(plist))
tree = self.get_lazy_node()
tree = tree[0]
while True:
pending = []
for path_elems in plist:
if level >= len(path_elems):
continue
cxpath = '/'.join(path_elems[:level + 1])
if cxpath not in pending:
pending.append(cxpath)
if len(pending) == 0:
break
for cxpath in pending:
subtree = self.get_lazy_node(cxpath, False)
xpath = self.toxpath(cxpath)
if len(subtree) == 0:
continue
tree.find(xpath).remove(tree.find(xpath)[0])
for child in subtree:
cpath = child.get('path', '')
values = vdict.get(cpath, '')
if values is not None:
for key in values:
child.set(key, values[key])
tree.find(xpath).append(child)
level += 1
# end while
return tree
def get_lazy_subtree(self, base, path):
"""
Returns yang explorer compatible lazy subtree xml. A lazy
tree returns a cxml nested tree from base to requested
node.
Other node along the path returned as _placeholder_
nodes for on-demand loading in client tree.
"""
tree = self.get_lazy_node(base)
if not path:
return tree
path_elems = path.split('/')
xpath = ''
subpath = base
for elems in path_elems[1:]:
subpath += '/' + elems
logging.info('Query: ' + subpath)
nodes = self.get_lazy_node(subpath)
if not xpath:
xpath = 'node[@name="%s"]' % elems
else:
xpath += '/node[@name="%s"]' % elems
temp = tree.find(xpath)
if temp is not None and nodes:
tree.find(xpath).remove(tree.find(xpath)[0])
for child in nodes:
tree.find(xpath).append(child)
else:
logging.error('Error: %s not found' % xpath)
break
return tree
def get_namespaces(self):
if self.cxml is None:
return []
return [(ns.get('prefix', ''), ns.get('module', ''), ns.text)
for ns in self.cxml.getroot() if ns.tag == 'namespace']
class CxmlIterator(object):
""" XPath Iterator for Cxml
@params filename:string - cxml file path
@params include-keys: bool - include keys in xpath
@params include-prefixes:list - list of included namespaces/prefixes
@params include-default:bool - include xpath with root-prefix
@params add-root-prefix:bool - add root-prefix in xpath
"""
def __init__(self, filename, cxml=None, options={}):
if cxml:
self.handle = cxml
else:
self.handle = ET.parse(filename)
self.inc_keys = options.get('include-keys', False)
self.inc_prefixes = options.get('include-prefixes', [])
self.inc_default = options.get('include-default', False)
self.add_root_prefix = options.get('add-root-prefix', False)
self.current = self.handle.getroot()
self.prefix = self.current.get('prefix', None)
self.path = [self.current.get('name')]
def __iter__(self):
return self
def reset(self):
self.current = self.handle.getroot()
self.prefix = self.current.get('prefix', None)
self.path = [self.current.get('name')]
def _get_next_parent(self):
_parent = self.current.getparent()
while _parent is not None:
uncle = _parent.getnext()
if uncle is None:
_parent = _parent.getparent()
self.path.pop()
continue
if self._filter(uncle):
_parent = _parent.getparent()
continue
return uncle
return _parent
def _set_xpath(self):
_name = self.current.get('name', None)
# add keys in xpath if required
if self.inc_keys and self.current.get('type', '') == 'list':
_keys = self.current.get('key', '')
_name += '[' + _keys + ']'
# add default prefix in xpath if required
if self.add_root_prefix and ':' not in _name:
_name = self.prefix + ':' + _name
# append to xpath list
self.path.append(_name)
def _get_prefix(self, node):
name = node.get('name', None)
return name.split(':')[0] if ':' in name else None
def _filter(self, node):
""" Filter xpath """
if not self.inc_prefixes:
return False
pfx = self._get_prefix(node)
if pfx is not None:
return pfx not in self.inc_prefixes
return False
def next(self):
# Depth First Traversal
# Look for children first
if len(self.current):
for child in self.current.findall('node'):
if self._filter(child):
continue
self.current = child
self._set_xpath()
if self.has_prefix():
return '/'.join(self.path), self.current
return self.next()
# Look for siblings next
_next = self.current.getnext()
self.path.pop()
while _next is not None:
if self._filter(_next):
_next = _next.getnext()
continue
self.current = _next
self._set_xpath()
if self.has_prefix():
return '/'.join(self.path), self.current
return self.next()
# Look for parent last
_parent = self._get_next_parent()
if _parent is None:
raise StopIteration()
self.path.pop()
if not self._filter(_parent):
self.current = _parent
self._set_xpath()
if self.has_prefix():
return '/'.join(self.path), self.current
return self.next()
def has_prefix(self):
if not self.inc_prefixes:
return True
if self.inc_default:
if self.add_root_prefix:
if not any(not elem.startswith(self.prefix + ':') for elem in self.path[1:]):
return True
else:
if not any(':' in elem for elem in self.path[1:]):
return True
for i_pfx in self.inc_prefixes:
if any(elem.startswith(i_pfx + ':') for elem in self.path):
return True
return False
def get_cxml(filename):
""" Create and return CXML object from File or LocalCache """
cxml = Cxml(filename)
return cxml
| |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from oslo.config import cfg
from akanda.rug.openstack.common.gettextutils import _
from akanda.rug.openstack.common import importutils
from akanda.rug.openstack.common import jsonutils
from akanda.rug.openstack.common import local
from akanda.rug.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
'''RPC Envelope Version.
This version number applies to the top level structure of messages sent out.
It does *not* apply to the message payload, which must be versioned
independently. For example, when using rpc APIs, a version number is applied
for changes to the API being exposed over rpc. This version number is handled
in the rpc proxy and dispatcher modules.
This version number applies to the message envelope that is used in the
serialization done inside the rpc layer. See serialize_msg() and
deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
'oslo.version': <RPC Envelope Version as a String>,
'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
without a message envelope.
So, the current message envelope just includes the envelope version. It may
eventually contain additional information, such as a signature for the message
payload.
We will JSON encode the application message payload. The message envelope,
which includes the JSON encoded application message body, will be passed down
to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
_VERSION_KEY = 'oslo.version'
_MESSAGE_KEY = 'oslo.message'
# TODO(russellb) Turn this on after Grizzly.
_SEND_RPC_ENVELOPE = False
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except Exception:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
message = _("Timeout while waiting on RPC response.")
class DuplicateMessageError(RPCException):
message = _("Found duplicate message(%(msg_id)s). Skipping it.")
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
message = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class UnsupportedRpcEnvelopeVersion(RPCException):
message = _("Specified RPC envelope version, %(version)s, "
"not supported by this endpoint.")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def create_worker(self, topic, proxy, pool_name):
"""Create a worker on this connection.
A worker is like a regular consumer of messages directed to a
topic, except that it is part of a set of such consumers (the
"pool") which may run in parallel. Every pool of workers will
receive a given message, but only one worker in the pool will
be asked to process it. Load is distributed across the members
of the pool in round-robin fashion.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic.
:param proxy: The object that will handle all incoming messages.
:param pool_name: String containing the name of the pool of workers
"""
raise NotImplementedError()
def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
:param callback: Callable to be invoked for each message.
:type callback: callable accepting one argument
:param pool_name: The name of the consumer pool.
:type pool_name: str
:param topic: The routing topic for desired messages.
:type topic: str
:param exchange_name: The name of the message exchange where
the client should attach. Defaults to
the configured exchange.
:type exchange_name: str
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = {'set_admin_password': [('args', 'new_pass')],
'run_instance': [('args', 'admin_password')],
'route_message': [('args', 'message', 'args', 'method_info',
'method_kwargs', 'password'),
('args', 'message', 'args', 'method_info',
'method_kwargs', 'admin_password')]}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
has_token = 'auth_token' in msg_data
if not any([has_method, has_context_token, has_token]):
return log_func(msg, msg_data)
msg_data = copy.deepcopy(msg_data)
if has_method:
for arg in SANITIZE.get(msg_data['method'], []):
try:
d = msg_data
for elem in arg[:-1]:
d = d[elem]
d[arg[-1]] = '<SANITIZED>'
except KeyError, e:
LOG.info(_('Failed to sanitize %(item)s. Key error %(err)s'),
{'item': arg,
'err': e})
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
if has_token:
msg_data['auth_token'] = '<SANITIZED>'
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info, log_failure=True):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
if log_failure:
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
data = {
'class': str(failure.__class__.__name__),
'module': str(failure.__class__.__module__),
'message': unicode(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(**failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override, '__unicode__': str_override})
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
class CommonRpcContext(object):
def __init__(self, **kwargs):
self.values = kwargs
def __getattr__(self, key):
try:
return self.values[key]
except KeyError:
raise AttributeError(key)
def to_dict(self):
return copy.deepcopy(self.values)
@classmethod
def from_dict(cls, values):
return cls(**values)
def deepcopy(self):
return self.from_dict(self.to_dict())
def update_store(self):
local.store.context = self
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
# TODO(russellb) This method is a bit of a nova-ism. It makes
# some assumptions about the data in the request context sent
# across rpc, while the rest of this class does not. We could get
# rid of this if we changed the nova code that uses this to
# convert the RpcContext back to its native RequestContext doing
# something like nova.context.RequestContext.from_dict(ctxt.to_dict())
context = self.deepcopy()
context.values['is_admin'] = True
context.values.setdefault('roles', [])
if 'admin' not in context.values['roles']:
context.values['roles'].append('admin')
if read_deleted is not None:
context.values['read_deleted'] = read_deleted
return context
class ClientException(Exception):
"""This encapsulates some actual exception that is expected to be
hit by an RPC proxy object. Merely instantiating it records the
current exception information, which will be passed back to the
RPC client without exceptional logging."""
def __init__(self):
self._exc_info = sys.exc_info()
def catch_client_exception(exceptions, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception, e:
if type(e) in exceptions:
raise ClientException()
else:
raise
def client_exceptions(*exceptions):
"""Decorator for manager methods that raise expected exceptions.
Marking a Manager method with this decorator allows the declaration
of expected exceptions that the RPC layer should not consider fatal,
and not log as if they were generated in a real error scenario. Note
that this will cause listed exceptions to be wrapped in a
ClientException, which is used internally by the RPC layer."""
def outer(func):
def inner(*args, **kwargs):
return catch_client_exception(exceptions, func, *args, **kwargs)
return inner
return outer
def version_is_compatible(imp_version, version):
"""Determine whether versions are compatible.
:param imp_version: The version implemented
:param version: The version requested by an incoming message.
"""
version_parts = version.split('.')
imp_version_parts = imp_version.split('.')
if int(version_parts[0]) != int(imp_version_parts[0]): # Major
return False
if int(version_parts[1]) > int(imp_version_parts[1]): # Minor
return False
return True
def serialize_msg(raw_msg, force_envelope=False):
if not _SEND_RPC_ENVELOPE and not force_envelope:
return raw_msg
# NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
# information about this format.
msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
_MESSAGE_KEY: jsonutils.dumps(raw_msg)}
return msg
def deserialize_msg(msg):
# NOTE(russellb): Hang on to your hats, this road is about to
# get a little bumpy.
#
# Robustness Principle:
# "Be strict in what you send, liberal in what you accept."
#
# At this point we have to do a bit of guessing about what it
# is we just received. Here is the set of possibilities:
#
# 1) We received a dict. This could be 2 things:
#
# a) Inspect it to see if it looks like a standard message envelope.
# If so, great!
#
# b) If it doesn't look like a standard message envelope, it could either
# be a notification, or a message from before we added a message
# envelope (referred to as version 1.0).
# Just return the message as-is.
#
# 2) It's any other non-dict type. Just return it and hope for the best.
# This case covers return values from rpc.call() from before message
# envelopes were used. (messages to call a method were always a dict)
if not isinstance(msg, dict):
# See #2 above.
return msg
base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
if not all(map(lambda key: key in msg, base_envelope_keys)):
# See #1.b above.
return msg
# At this point we think we have the message envelope
# format we were expecting. (#1.a above)
if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
return raw_msg
| |
# -*- coding: utf-8 -*-
import unittest
from datetime import date
import trytond.tests.test_tryton
from trytond.tests.test_tryton import POOL, DB_NAME, USER, CONTEXT
from trytond.transaction import Transaction
from decimal import Decimal
from test_base import TestBase
from trytond.exceptions import UserError
from trytond.config import config
config.set('email', 'from', 'test@ol.in')
class TestGiftCard(TestBase):
'''
Test Gift Card
'''
def test0010_create_gift_card(self):
"""
Create gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
Currency = POOL.get('currency.currency')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.usd = Currency(
name='US Dollar', symbol=u'$', code='USD',
)
self.usd.save()
gift_card, = GiftCard.create([{
'currency': self.usd.id,
'amount': Decimal('20'),
}])
self.assertEqual(gift_card.state, 'draft')
def test0015_on_change_currency(self):
"""
Check if currency digits are changed because of currency of gift
card
"""
GiftCard = POOL.get('gift_card.gift_card')
Currency = POOL.get('currency.currency')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.usd = Currency(
name='US Dollar', symbol=u'$', code='USD', digits=3
)
self.usd.save()
gift_card = GiftCard(currency=self.usd.id)
self.assertEqual(gift_card.on_change_with_currency_digits(), 3)
gift_card = GiftCard(currency=None)
self.assertEqual(gift_card.on_change_with_currency_digits(), 2)
def test0020_gift_card_on_processing_sale(self):
"""
Check if gift card is being created on processing sale
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
InvoiceLine = POOL.get('account.invoice.line')
Configuration = POOL.get('gift_card.configuration')
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(is_gift_card=True)
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gc_price, _, = gift_card_product.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
'gc_price': gc_price,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
sale_line1, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
sale_line2, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', self.product.id),
])
sale_line3, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', None),
])
self.assertTrue(sale_line1.is_gift_card)
self.assertFalse(sale_line2.is_gift_card)
self.assertFalse(sale_line3.is_gift_card)
self.assertEqual(sale_line1.gift_card_delivery_mode, 'physical')
# Gift card line amount is included in untaxed amount
self.assertEqual(sale.untaxed_amount, 900)
# Gift card line amount is included in total amount
self.assertEqual(sale.total_amount, 900)
Sale.quote([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
Sale.confirm([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
self.assertFalse(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertFalse(Invoice.search([]))
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
self.assertTrue(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertEqual(
GiftCard.search(
[('sale_line', '=', sale_line1.id)], count=True
), 1
)
self.assertEqual(Invoice.search([], count=True), 1)
gift_card, = GiftCard.search([
('sale_line', '=', sale_line1.id)
])
invoice, = Invoice.search([])
line, = InvoiceLine.search([
('invoice', '=', invoice.id),
('description', '=', 'Gift Card'),
])
self.assertEqual(
line.account,
Configuration(1).liability_account
)
self.assertEqual(gift_card.amount, 500)
self.assertEqual(gift_card.state, 'active')
self.assertEqual(gift_card.sale, sale)
self.assertEqual(invoice.untaxed_amount, 900)
self.assertEqual(invoice.total_amount, 900)
def test0021_phy_gift_card_on_processing_sale(self):
"""
Check if physical gift card is being created on processing sale when
invoice method is shipping
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
Configuration = POOL.get('gift_card.configuration')
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(is_gift_card=True)
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gc_price, _, = gift_card_product.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_method': 'shipment',
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
'gc_price': gc_price,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
sale_line1, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
sale_line2, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', self.product.id),
])
sale_line3, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', None),
])
self.assertTrue(sale_line1.is_gift_card)
self.assertFalse(sale_line2.is_gift_card)
self.assertFalse(sale_line3.is_gift_card)
self.assertEqual(sale_line1.gift_card_delivery_mode, 'physical')
# Gift card line amount is included in untaxed amount
self.assertEqual(sale.untaxed_amount, 900)
# Gift card line amount is included in total amount
self.assertEqual(sale.total_amount, 900)
Sale.quote([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
Sale.confirm([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
self.assertFalse(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertFalse(Invoice.search([]))
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
self.assertEqual(sale.untaxed_amount, 900)
self.assertEqual(sale.total_amount, 900)
self.assertTrue(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertEqual(
GiftCard.search(
[('sale_line', '=', sale_line1.id)], count=True
), 1
)
self.assertEqual(Invoice.search([], count=True), 0)
gift_card, = GiftCard.search([
('sale_line', '=', sale_line1.id)
])
self.assertEqual(gift_card.amount, 500)
self.assertEqual(gift_card.state, 'active')
self.assertEqual(gift_card.sale, sale)
def test0022_virtual_gift_card_on_processing_sale(self):
"""
Check if virtual gift card is being created on processing sale
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
InvoiceLine = POOL.get('account.invoice.line')
Configuration = POOL.get('gift_card.configuration')
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(
type='service',
mode='virtual',
is_gift_card=True
)
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gc_price, _, = gift_card_product.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_method': 'shipment',
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
'gc_price': gc_price,
'recipient_email': 'test@example.com',
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
sale_line1, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
sale_line3, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', None),
])
self.assertTrue(sale_line1.is_gift_card)
self.assertFalse(sale_line3.is_gift_card)
self.assertEqual(sale_line1.gift_card_delivery_mode, 'virtual')
# Gift card line amount is included in untaxed amount
self.assertEqual(sale.untaxed_amount, 500)
# Gift card line amount is included in total amount
self.assertEqual(sale.total_amount, 500)
Sale.quote([sale])
self.assertEqual(sale.untaxed_amount, 500)
self.assertEqual(sale.total_amount, 500)
Sale.confirm([sale])
self.assertEqual(sale.untaxed_amount, 500)
self.assertEqual(sale.total_amount, 500)
self.assertFalse(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertFalse(Invoice.search([]))
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
self.assertEqual(sale.untaxed_amount, 500)
self.assertEqual(sale.total_amount, 500)
self.assertTrue(
GiftCard.search([('sale_line', '=', sale_line1.id)])
)
self.assertEqual(
GiftCard.search(
[('sale_line', '=', sale_line1.id)], count=True
), 1
)
self.assertEqual(Invoice.search([], count=True), 1)
gift_card, = GiftCard.search([
('sale_line', '=', sale_line1.id)
])
invoice, = Invoice.search([])
line, = InvoiceLine.search([
('invoice', '=', invoice.id),
('description', '=', 'Gift Card'),
])
self.assertEqual(
line.account,
Configuration(1).liability_account
)
self.assertEqual(gift_card.amount, 500)
self.assertEqual(gift_card.state, 'active')
self.assertEqual(gift_card.sale, sale)
self.assertEqual(invoice.untaxed_amount, 500)
self.assertEqual(invoice.total_amount, 500)
def test0025_create_gift_card_for_line(self):
"""
Check if gift card is not create if sale line is of type line
"""
Sale = POOL.get('sale.sale')
SaleLine = POOL.get('sale.line')
GiftCard = POOL.get('gift_card.gift_card')
Configuration = POOL.get('gift_card.configuration')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
}])
sale_line, = SaleLine.create([{
'sale': sale.id,
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}])
self.assertFalse(
GiftCard.search([('sale_line', '=', sale_line.id)])
)
sale_line.create_gift_cards()
# No gift card is created
self.assertFalse(
GiftCard.search([('sale_line', '=', sale_line.id)])
)
sale_line3, = SaleLine.copy([sale_line])
self.assertFalse(sale_line3.gift_cards)
def test0025_gift_card_on_processing_sale_without_liability_account(self):
"""
Check if gift card is being created on processing sale when liability
account is missing from gift card configuration
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(is_gift_card=True)
gc_price, _, = gift_card_product.gift_card_prices
with Transaction().set_context({'company': self.company.id}):
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Test description2',
'product': gift_card_product,
'gc_price': gc_price,
}])
]
}])
# Gift card line amount is included in untaxed amount
self.assertEqual(sale.untaxed_amount, 900)
# Gift card line amount is included in total amount
self.assertEqual(sale.total_amount, 900)
Sale.quote([sale])
Sale.confirm([sale])
self.assertFalse(
GiftCard.search([('sale_line.sale', '=', sale.id)])
)
self.assertFalse(Invoice.search([]))
with self.assertRaises(UserError):
Sale.process([sale])
def test0030_check_on_change_amount(self):
"""
Check if amount is changed with quantity and unit price
"""
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
# Sale line as gift card
sale_line = SaleLine(
unit_price=Decimal('22.56789'),
type='line', sale=None
)
on_change_vals = sale_line.on_change_is_gift_card()
self.assertTrue('description' in on_change_vals)
self.assertTrue('product' not in on_change_vals)
sale_line.is_gift_card = True
on_change_vals = sale_line.on_change_is_gift_card()
self.assertEqual(on_change_vals['product'], None)
self.assertTrue('description' in on_change_vals)
self.assertTrue('unit' in on_change_vals)
def test0040_gift_card_transition(self):
"""
Check gift card transitions
"""
GiftCard = POOL.get('gift_card.gift_card')
Currency = POOL.get('currency.currency')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.usd = Currency(
name='US Dollar', symbol=u'$', code='USD',
)
self.usd.save()
gift_card, = GiftCard.create([{
'currency': self.usd.id,
'amount': Decimal('20'),
}])
self.assertEqual(gift_card.state, 'draft')
# Gift card can become active in draft state
GiftCard.activate([gift_card])
self.assertEqual(gift_card.state, 'active')
# Gift card can be calcelled from active state
GiftCard.cancel([gift_card])
self.assertEqual(gift_card.state, 'canceled')
# Gift card can be set back to draft state once canceled
GiftCard.draft([gift_card])
self.assertEqual(gift_card.state, 'draft')
# Gift card can be canceled from draft state also
GiftCard.cancel([gift_card])
self.assertEqual(gift_card.state, 'canceled')
def test0050_gift_card_sequence(self):
"""
Check sequence is created on activating gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
Currency = POOL.get('currency.currency')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.usd = Currency(
name='US Dollar', symbol=u'$', code='USD',
)
self.usd.save()
gift_card, = GiftCard.create([{
'currency': self.usd.id,
'amount': Decimal('20'),
}])
self.assertTrue(gift_card.number)
number = gift_card.number
GiftCard.activate([gift_card])
self.assertEqual(gift_card.number, number)
gift_card2, = GiftCard.copy([gift_card])
self.assertNotEqual(gift_card2.number, number)
def test0050_authorize_gift_card_payment_gateway_valid_card(self):
"""
Test gift card authorization
"""
GiftCard = POOL.get('gift_card.gift_card')
PaymentTransaction = POOL.get('payment_gateway.transaction')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
active_gift_card, = GiftCard.create([{
'amount': Decimal('150'),
'number': '45671338',
'state': 'active',
}])
gateway = self.create_payment_gateway()
# Case 1:
# Gift card available amount (150) > amount to be paid (50)
payment_transaction = PaymentTransaction(
description="Pay invoice using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('50'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.authorize([payment_transaction])
self.assertEqual(payment_transaction.state, 'authorized')
self.assertEqual(
active_gift_card.amount_authorized, Decimal('50')
)
self.assertEqual(
active_gift_card.amount_available, Decimal('100')
)
# Case 2: Gift card amount (100) < amount to be paid (300)
payment_transaction = PaymentTransaction(
description="Pay invoice using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('300'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
with self.assertRaises(UserError):
PaymentTransaction.authorize([payment_transaction])
def test0055_capture_gift_card(self):
"""
Test capturing of gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
PaymentTransaction = POOL.get('payment_gateway.transaction')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
active_gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671338',
'state': 'active',
}])
gateway = self.create_payment_gateway()
self.assertEqual(
active_gift_card.amount_captured, Decimal('0')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('0')
)
self.assertEqual(
active_gift_card.amount_available, Decimal('200')
)
# Capture
# Case 1
# Gift card available amount(200) > amount to be paid (180)
payment_transaction = PaymentTransaction(
description="Pay using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('100'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.capture([payment_transaction])
self.assertEqual(payment_transaction.state, 'posted')
self.assertEqual(
active_gift_card.amount_captured, Decimal('100')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('0')
)
# 200 - 100 = 100
self.assertEqual(
active_gift_card.amount_available, Decimal('100')
)
# Case 2
# Gift card available amount (100) = amount to be paid (100)
payment_transaction = PaymentTransaction(
description="Pay invoice using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('100'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.capture([payment_transaction])
self.assertEqual(payment_transaction.state, 'posted')
self.assertEqual(
active_gift_card.amount_captured, Decimal('200')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('0')
)
# 200 - 200 = 0
self.assertEqual(
active_gift_card.amount_available, Decimal('0')
)
self.assertEqual(active_gift_card.state, 'used')
active_gift_card, = GiftCard.create([{
'amount': Decimal('10'),
'number': '45671339',
'state': 'active',
}])
# Case 3: Gift card amount (10) < amount to be paid (100)
payment_transaction = PaymentTransaction(
description="Pay invoice using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('100'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
with self.assertRaises(UserError):
PaymentTransaction.capture([payment_transaction])
def test0057_settle_gift_card(self):
"""
Test settlement of gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
PaymentTransaction = POOL.get('payment_gateway.transaction')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
active_gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671338',
'state': 'active',
}])
gateway = self.create_payment_gateway()
# Authorization of gift card
# Case 1: Gift card available amount > amount to be paid
payment_transaction = PaymentTransaction(
description="Pay using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('100'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.authorize([payment_transaction])
self.assertEqual(
active_gift_card.amount_captured, Decimal('0')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('100')
)
# 200 - 100 = 100
self.assertEqual(
active_gift_card.amount_available, Decimal('100')
)
# Settlement
# Case 1: Gift card amount (100) > amount to be settled (50)
payment_transaction = PaymentTransaction(
description="Pay using gift card",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('50'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.authorize([payment_transaction])
self.assertEqual(
active_gift_card.amount_captured, Decimal('0')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('150')
)
# 100 - 50 = 50
self.assertEqual(
active_gift_card.amount_available, Decimal('50')
)
PaymentTransaction.settle([payment_transaction])
self.assertEqual(payment_transaction.state, 'posted')
self.assertEqual(
active_gift_card.amount_captured, Decimal('50')
)
self.assertEqual(
active_gift_card.amount_authorized, Decimal('100')
)
# 200 - 100 - 50 = 50
self.assertEqual(
active_gift_card.amount_available, Decimal('50')
)
def test0060_payment_gateway_methods_and_providers(self):
"""
Tests gateway methods
"""
PaymentGateway = POOL.get('payment_gateway.gateway')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gateway = PaymentGateway(
provider='self',
)
self.assertTrue(gateway.get_methods())
self.assertTrue(('gift_card', 'Gift Card') in gateway.get_methods())
gateway = PaymentGateway(
provider='authorize.net',
)
self.assertFalse(gateway.get_methods())
def test0070_gift_card_amount(self):
"""
Check authorized, captured and available amount fro gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
PaymentTransaction = POOL.get('payment_gateway.transaction')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
active_gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671338',
'state': 'active',
}])
gateway = self.create_payment_gateway()
# Payment transactions in authorized state
payment_transaction1 = PaymentTransaction(
description="Payment Transaction 1",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('70'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction1.save()
PaymentTransaction.authorize([payment_transaction1])
payment_transaction2 = PaymentTransaction(
description="Payment Transaction 2",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('20'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction2.save()
PaymentTransaction.authorize([payment_transaction2])
# Payment transactions being captured
payment_transaction3 = PaymentTransaction(
description="Payment Transaction 3",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('10'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction3.save()
PaymentTransaction.capture([payment_transaction3])
payment_transaction4 = PaymentTransaction(
description="Payment Transaction 4",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('20'),
currency=self.company.currency.id,
gateway=gateway.id,
gift_card=active_gift_card,
credit_account=self.party1.account_receivable.id,
)
payment_transaction4.save()
PaymentTransaction.capture([payment_transaction4])
self.assertEqual(active_gift_card.amount_authorized, 90)
self.assertEqual(active_gift_card.amount_captured, 30)
self.assertEqual(active_gift_card.amount_available, 80)
def test0080_test_gift_card_report(self):
"""
Test Gift Card report
"""
GiftCard = POOL.get('gift_card.gift_card')
GiftCardReport = POOL.get('gift_card.gift_card', type='report')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671338',
'state': 'active',
}])
val = GiftCardReport.execute([gift_card.id], {})
self.assert_(val)
# Assert report name
self.assertEqual(val[3], 'Gift Card')
def test0090_test_gift_card_deletion(self):
"""
Test that Gift Card should not be deleted in active state
"""
GiftCard = POOL.get('gift_card.gift_card')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671338',
'state': 'active',
}])
with self.assertRaises(Exception):
GiftCard.delete([gift_card])
# Try to delete gift card in some other state and it will
# be deleted
gift_card, = GiftCard.create([{
'amount': Decimal('200'),
'number': '45671339',
'state': 'draft',
}])
GiftCard.delete([gift_card])
def test0100_send_virtual_gift_cards(self):
"""
Check if virtual gift cards are sent through email
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
Configuration = POOL.get('gift_card.configuration')
SaleLine = POOL.get('sale.line')
EmailQueue = POOL.get('email.queue')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(
type='service', mode='virtual', is_gift_card=True
)
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gc_price, _, = gift_card_product.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
'recipient_email': 'test@gift_card.com',
'recipient_name': 'John Doe',
'gc_price': gc_price.id
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
gift_card_line, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
self.assertEqual(
gift_card_line.gift_card_delivery_mode, 'virtual'
)
Sale.quote([sale])
Sale.confirm([sale])
# No gift card yet
self.assertFalse(
GiftCard.search([('sale_line', '=', gift_card_line.id)])
)
# No Email is being sent yet
self.assertFalse(
EmailQueue.search([
('to_addrs', '=', gift_card_line.recipient_email),
])
)
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
# Gift card is created
self.assertTrue(
GiftCard.search([('sale_line', '=', gift_card_line.id)])
)
self.assertEqual(
GiftCard.search(
[('sale_line', '=', gift_card_line.id)], count=True
), 1
)
self.assertEqual(Invoice.search([], count=True), 1)
gift_card, = GiftCard.search([
('sale_line', '=', gift_card_line.id)
])
self.assertEqual(
gift_card.recipient_email, gift_card_line.recipient_email
)
self.assertEqual(
gift_card.recipient_name, gift_card_line.recipient_name
)
# Email is being sent
self.assertTrue(
EmailQueue.search([
('to_addrs', '=', gift_card_line.recipient_email),
])
)
def test0110_test_sending_email_multiple_times(self):
"""
Test that email should not be sent multiple times for gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
EmailQueue = POOL.get('email.queue')
Sale = POOL.get('sale.sale')
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
gift_card_product = self.create_product(
type='service', mode='virtual', is_gift_card=True,
)
gc_price, _, = gift_card_product.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
'recipient_email': 'test@gift_card.com',
'recipient_name': 'John Doe',
'gc_price': gc_price.id,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
gift_card_line, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
gift_card, = GiftCard.create([{
'sale_line': gift_card_line.id,
'amount': Decimal('200'),
'number': '45671338',
'recipient_email': 'test@gift_card.com',
'recipient_name': 'Jhon Doe',
}])
# No Email is being sent yet
self.assertFalse(
EmailQueue.search([
('to_addrs', '=', gift_card.recipient_email),
])
)
self.assertFalse(gift_card.is_email_sent)
# Send email by activating gift card
GiftCard.activate([gift_card])
# Email is being sent
self.assertEqual(
EmailQueue.search([
('to_addrs', '=', gift_card.recipient_email),
], count=True), 1
)
self.assertTrue(gift_card.is_email_sent)
# Try sending email again
GiftCard.activate([gift_card])
# Email is not sent
self.assertEqual(
EmailQueue.search([
('to_addrs', '=', gift_card.recipient_email),
], count=True), 1
)
def test0112_validate_product_type_and_mode(self):
"""
Check if gift card product is service product for virtual mode and
goods otherwise
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
# Create gift card product of service type in physical mode
with self.assertRaises(UserError):
self.create_product(
type='service', mode='physical', is_gift_card=True,
)
# Create gift card product of service type in combined mode
with self.assertRaises(UserError):
self.create_product(
type='service', mode='combined', is_gift_card=True
)
# Create gift card product of goods type in virtual mode
with self.assertRaises(UserError):
self.create_product(
type='goods', mode='virtual', is_gift_card=True
)
# In virtual mode product can be created with service type
# only
service_product = self.create_product(
type='service', mode='virtual', is_gift_card=True
)
self.assert_(service_product)
# In physical mode product can be created with goods type
# only
goods_product = self.create_product(
type='goods', mode='physical', is_gift_card=True
)
self.assert_(goods_product)
# In combined mode product can be created with goods type only
goods_product = self.create_product(
type='goods', mode='combined', is_gift_card=True
)
self.assert_(goods_product)
def test0115_test_gc_min_max(self):
"""
Test gift card minimum and maximum amounts on product template
"""
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(
type='service', mode='virtual', is_gift_card=True
)
gift_card_product.allow_open_amount = True
with Transaction().set_context({'company': self.company.id}):
# gc_min > gc_max
gift_card_product.gc_min = Decimal('70')
gift_card_product.gc_max = Decimal('60')
with self.assertRaises(UserError):
gift_card_product.save()
# gc_min as negative
gift_card_product.gc_min = Decimal('-10')
gift_card_product.gc_max = Decimal('60')
with self.assertRaises(UserError):
gift_card_product.save()
# gc_max as negative
gift_card_product.gc_min = Decimal('10')
gift_card_product.gc_max = Decimal('-80')
with self.assertRaises(UserError):
gift_card_product.save()
# gc_min < gc_max
gift_card_product.gc_min = Decimal('70')
gift_card_product.gc_max = Decimal('100')
gift_card_product.save()
def test0118_validate_gc_amount_on_sale_line(self):
"""
Tests if gift card line amount lies between gc_min and gc_max defined
on the tempalte
"""
Sale = POOL.get('sale.sale')
SaleLine = POOL.get('sale.line')
Configuration = POOL.get('gift_card.configuration')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gift_card_product = self.create_product(
type='goods', mode='physical', is_gift_card=True
)
gift_card_product.allow_open_amount = True
gift_card_product.gc_min = Decimal('100')
gift_card_product.gc_max = Decimal('500')
gift_card_product.save()
with Transaction().set_context({'company': self.company.id}):
# gift card line amount < gc_min
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 10,
'unit': self.uom,
'unit_price': 50,
'description': 'Gift Card',
'product': gift_card_product,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
Sale.quote([sale])
Sale.confirm([sale])
with self.assertRaises(UserError):
Sale.process([sale])
# gift card line amount > gc_max
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 10,
'unit': self.uom,
'unit_price': 700,
'description': 'Gift Card',
'product': gift_card_product,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
Sale.quote([sale])
Sale.confirm([sale])
with self.assertRaises(UserError):
Sale.process([sale])
# gc_min <= gift card line amount <= gc_max
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 200,
'description': 'Test description1',
'product': self.product.id,
}, {
'quantity': 3,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card',
'product': gift_card_product,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
gift_card_line, = SaleLine.search([
('sale', '=', sale.id),
('product', '=', gift_card_product.id),
])
Sale.quote([sale])
Sale.confirm([sale])
self.assertEqual(len(gift_card_line.gift_cards), 0)
self.SalePayment.create([{
'sale': sale.id,
'amount': sale.total_amount,
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
self.assertEqual(sale.state, 'processing')
self.assertEqual(len(gift_card_line.gift_cards), 3)
def test0120_test_gift_card_prices(self):
"""
Test gift card price
"""
GiftCardPrice = POOL.get('product.product.gift_card.price')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(
type='service', mode='virtual', is_gift_card=True
)
with self.assertRaises(UserError):
GiftCardPrice.create([{
'product': gift_card_product,
'price': -90
}])
price, = GiftCardPrice.create([{
'product': gift_card_product,
'price': 90
}])
self.assert_(price)
def test0130_test_on_change_gc_price(self):
"""
Tests if unit price is changed with gift card price
"""
SaleLine = POOL.get('sale.line')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product = self.create_product(
type='service', mode='virtual', is_gift_card=True
)
gc_price, _, = gift_card_product.gift_card_prices
sale_line = SaleLine(gc_price=gc_price)
result = sale_line.on_change_gc_price()
self.assertEqual(result['unit_price'], gc_price.price)
def test0140_pay_manually(self):
"""
Check authorized, captured and available amount for manual method
"""
PaymentTransaction = POOL.get('payment_gateway.transaction')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
gateway = self.create_payment_gateway(method='manual')
# Authorise Payment transaction
payment_transaction = PaymentTransaction(
description="Payment Transaction 1",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('70'),
currency=self.company.currency.id,
gateway=gateway.id,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.authorize([payment_transaction])
self.assertEqual(payment_transaction.state, 'authorized')
# Settle Payment transactions
PaymentTransaction.settle([payment_transaction])
self.assertEqual(payment_transaction.state, 'posted')
# Capture Payment transactions
payment_transaction = PaymentTransaction(
description="Payment Transaction 1",
party=self.party1.id,
address=self.party1.addresses[0].id,
amount=Decimal('70'),
currency=self.company.currency.id,
gateway=gateway.id,
credit_account=self.party1.account_receivable.id,
)
payment_transaction.save()
PaymentTransaction.capture([payment_transaction])
self.assertEqual(payment_transaction.state, 'posted')
def test0150_giftcard_redeem_wizard(self):
"""
Tests the gift card redeem wizard.
"""
GiftCard = POOL.get('gift_card.gift_card')
GCRedeemWizard = POOL.get('gift_card.redeem.wizard', type='wizard')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
with Transaction().set_context({'company': self.company.id}):
active_gift_card, = GiftCard.create([{
'amount': Decimal('150'),
'number': '45671338',
'state': 'draft',
}])
gateway = self.create_payment_gateway()
with Transaction().set_context(
active_ids=[active_gift_card.id]
):
session_id, start_state, end_state = GCRedeemWizard.create()
data = {
start_state: {
'name': 'start',
'gateway': gateway.id,
'description': 'This is a description.',
'party': self.party1.id,
'address': self.party1.addresses[0].id,
'amount': Decimal('100'),
'gift_card': active_gift_card.id,
'currency': self.usd,
},
}
# Trying to redeem GC in draft state, error is thrown.
# Note that a domain error is thrown instead of
# check_giftcard_state() being called.
with self.assertRaises(UserError):
GCRedeemWizard.execute(session_id, data, 'redeem')
# Test check_giftcard_state(). Draft state error is thrown.
with self.assertRaises(UserError):
GCRedeemWizard(session_id).check_giftcard_state(
active_gift_card
)
GiftCard.activate([active_gift_card])
# Test the default_start() method.
values = GCRedeemWizard(session_id).default_start({})
self.assertEqual(values['gift_card'], active_gift_card.id)
self.assertEqual(values['gateway'], gateway.id)
# Now execute the wizard properly.
GCRedeemWizard.execute(session_id, data, 'redeem')
self.assertEqual(active_gift_card.state, 'active')
self.assertEqual(
active_gift_card.amount_captured,
data[start_state]['amount']
)
data[start_state]['amount'] = Decimal('70')
# Error thrown because amount available is just 50.
with self.assertRaises(UserError):
GCRedeemWizard.execute(session_id, data, 'redeem')
data[start_state]['amount'] = Decimal('-70')
# Error thrown because amount is negative.
with self.assertRaises(UserError):
GCRedeemWizard.execute(session_id, data, 'redeem')
data[start_state]['amount'] = Decimal('50')
GCRedeemWizard.execute(session_id, data, 'redeem')
self.assertEqual(active_gift_card.state, 'used')
self.assertEqual(
active_gift_card.amount_available, Decimal('0')
)
# Now the gift card has already been used, cannot run
# wizard on it once again.
with self.assertRaises(UserError):
GCRedeemWizard(session_id).check_giftcard_state(
active_gift_card
)
def test0200_test_sale_payment_wizard_for_gift_card(self):
"""
Test the wizard to create sale payment for gift card
"""
PaymentWizard = POOL.get('sale.payment.add', type="wizard")
GiftCard = POOL.get('gift_card.gift_card')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
# Active gift card
active_gift_card, = GiftCard.create([{
'number': 'A1234',
'amount': Decimal('100'),
'currency': self.company.currency.id,
'state': 'active'
}])
# Inactive gift card
inactive_gift_card, = GiftCard.create([{
'number': 'A1567',
'amount': Decimal('50'),
'currency': self.company.currency.id,
'state': 'used'
}])
sale, = self.Sale.create([{
'reference': 'Test Sale',
'currency': self.company.currency.id,
'party': self.party1.id,
'sale_date': date.today(),
'company': self.company.id,
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
}])
sale_line, = self.SaleLine.create([{
'sale': sale,
'type': 'line',
'quantity': 2,
'unit': self.uom,
'unit_price': 20000,
'description': 'Test description',
'product': self.product.id
}])
payment_wizard = PaymentWizard(PaymentWizard.create()[0])
gift_card_gateway = self.create_payment_gateway()
payment_wizard.payment_info.gateway = gift_card_gateway.id
payment_wizard.payment_info.method = gift_card_gateway.method
payment_wizard.payment_info.amount = 200
payment_wizard.payment_info.payment_profile = None
payment_wizard.payment_info.party = sale.party.id
payment_wizard.payment_info.sale = sale.id
payment_wizard.payment_info.reference = 'ref1'
payment_wizard.payment_info.credit_account = \
sale.party.account_receivable.id
payment_wizard.payment_info.gift_card = active_gift_card.id
payment_wizard.payment_info.amount = 50
self.assertEqual(active_gift_card.amount_available, Decimal('100'))
with Transaction().set_context(active_id=sale.id):
payment_wizard.transition_add()
self.assertTrue(len(sale.payments), 1)
payment, = sale.payments
self.assertEqual(payment.amount, Decimal('50'))
self.assertEqual(payment.method, gift_card_gateway.method)
self.assertEqual(payment.provider, gift_card_gateway.provider)
self.assertEqual(payment.gift_card, active_gift_card)
def test0210_partial_payment_using_gift_card(self):
"""
Check partial payment using cash, credit card and gift card
"""
GiftCard = POOL.get('gift_card.gift_card')
Configuration = POOL.get('gift_card.configuration')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
# Create Active gift card
active_gift_card, = GiftCard.create([{
'number': 'A1234',
'amount': Decimal('100'),
'currency': self.company.currency.id,
'state': 'active'
}])
sale, = self.Sale.create([{
'reference': 'Test Sale',
'currency': self.company.currency.id,
'party': self.party1.id,
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'company': self.company.id,
'invoice_method': 'manual',
'shipment_method': 'manual',
'lines': [('create', [{
'description': 'Some item',
'unit_price': Decimal('100'),
'quantity': 1
}])]
}])
with Transaction().set_context(use_dummy=True):
# Create gateways
dummy_gateway = self.create_payment_gateway(
method='credit_card', provider='dummy'
)
cash_gateway = self.create_payment_gateway(
method='manual', provider='self'
)
gift_card_gateway = self.create_payment_gateway(
method='gift_card', provider='self'
)
# Create a payment profile
payment_profile = self.create_payment_profile(
self.party1, dummy_gateway
)
# Create sale payment for $30 in cash and $50 in card and $20
# in gift card
payment_gift_card, payment_cash, payment_credit_card = \
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('20'),
'gateway': gift_card_gateway,
'gift_card': active_gift_card.id,
'credit_account': self.party1.account_receivable.id,
}, {
'sale': sale.id,
'amount': Decimal('30'),
'gateway': cash_gateway,
'credit_account': self.party1.account_receivable.id,
}, {
'sale': sale.id,
'amount': Decimal('50'),
'payment_profile': payment_profile.id,
'gateway': dummy_gateway,
'credit_account': self.party1.account_receivable.id,
}])
self.assertTrue(
payment_credit_card.description.startswith("Paid by Card"))
self.assertTrue(
payment_cash.description.startswith("Paid by Cash"))
self.assertTrue(
payment_gift_card.description.startswith("Paid by Gift"))
self.assertEqual(sale.total_amount, Decimal('100'))
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('100'))
self.assertEqual(sale.payment_collected, Decimal('0'))
self.assertEqual(sale.payment_captured, Decimal('0'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
self.Sale.quote([sale])
self.Sale.confirm([sale])
with Transaction().set_context({'company': self.company.id}):
self.Sale.proceed([sale])
sale.process_pending_payments()
self.assertEqual(sale.state, 'processing')
self.assertEqual(len(sale.gateway_transactions), 3)
self.assertEqual(sale.total_amount, Decimal('100'))
self.assertEqual(sale.payment_total, Decimal('100'))
self.assertEqual(sale.payment_available, Decimal('0'))
self.assertEqual(sale.payment_collected, Decimal('100'))
self.assertEqual(sale.payment_captured, Decimal('100'))
self.assertEqual(sale.payment_authorized, Decimal('0'))
def test3000_gift_card_method(self):
"""
Check if gift card is being created according to gift card method
"""
Sale = POOL.get('sale.sale')
GiftCard = POOL.get('gift_card.gift_card')
Invoice = POOL.get('account.invoice')
Configuration = POOL.get('gift_card.configuration')
with Transaction().start(DB_NAME, USER, context=CONTEXT):
self.setup_defaults()
gift_card_product1 = self.create_product(is_gift_card=True)
gift_card_product2 = self.create_product(is_gift_card=True)
with Transaction().set_context({'company': self.company.id}):
Configuration.create([{
'liability_account': self._get_account_by_kind('revenue').id
}])
gc_price1, _, = gift_card_product1.gift_card_prices
gc_price2, _, = gift_card_product2.gift_card_prices
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card 1',
'product': gift_card_product1,
'gc_price': gc_price1
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card 2',
'product': gift_card_product2,
'gc_price': gc_price2,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
Sale.quote([sale])
Sale.confirm([sale])
self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
# Two giftcards should have been created and activated
self.assertEqual(
GiftCard.search([('state', '=', 'active')], count=True),
2
)
# Trigger sale process again
Sale.process([sale])
# No new giftcards should have been created
self.assertEqual(
GiftCard.search([('state', '=', 'active')], count=True),
2
)
# Now re-do sale with invoice payment
config = self.SaleConfig(1)
config.gift_card_method = 'invoice'
config.save()
sale, = Sale.create([{
'reference': 'Sale1',
'sale_date': date.today(),
'invoice_address': self.party1.addresses[0].id,
'shipment_address': self.party1.addresses[0].id,
'party': self.party1.id,
'lines': [
('create', [{
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card 1',
'product': gift_card_product1,
'gc_price': gc_price1
}, {
'quantity': 1,
'unit': self.uom,
'unit_price': 500,
'description': 'Gift Card 2',
'product': gift_card_product2,
'gc_price': gc_price2,
}, {
'type': 'comment',
'description': 'Test line',
}])
]
}])
Sale.quote([sale])
Sale.confirm([sale])
payment, = self.SalePayment.create([{
'sale': sale.id,
'amount': Decimal('1000'),
'gateway': self.create_payment_gateway('manual'),
'credit_account': self.party1.account_receivable.id,
}])
Sale.process([sale])
# No new giftcards
self.assertEqual(
GiftCard.search([('state', '=', 'active')], count=True),
2
)
# Post and pay the invoice
invoice, = sale.invoices
Invoice.post([invoice])
invoice.pay_invoice(
invoice.total_amount,
self.cash_journal,
invoice.invoice_date,
'Payment to make invoice paid - obviously!'
)
Invoice.paid([invoice])
# New giftcards
self.assertEqual(
GiftCard.search([('state', '=', 'active')], count=True),
4
)
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests(
unittest.TestLoader().loadTestsFromTestCase(TestGiftCard)
)
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| |
# -*- coding: utf-8 -*-
"""
sphinx.builders.html
~~~~~~~~~~~~~~~~~~~~
Several HTML builders.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import zlib
import codecs
import posixpath
import cPickle as pickle
from os import path
try:
from hashlib import md5
except ImportError:
# 2.4 compatibility
from md5 import md5
from docutils import nodes
from docutils.io import DocTreeInput, StringOutput
from docutils.core import Publisher
from docutils.utils import new_document
from docutils.frontend import OptionParser
from docutils.readers.doctree import Reader as DoctreeReader
from sphinx import package_dir, __version__
from sphinx.util import jsonimpl, copy_static_entry
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, ustrftime, copyfile
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.matching import patmatch, compile_matchers
from sphinx.util.pycompat import any, b
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.search import js_index
from sphinx.theming import Theme
from sphinx.builders import Builder
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.highlighting import PygmentsBridge
from sphinx.util.console import bold, darkgreen, brown
from sphinx.writers.html import HTMLWriter, HTMLTranslator, \
SmartyPantsHTMLTranslator
#: the filename for the inventory of objects
INVENTORY_FILENAME = 'objects.inv'
#: the filename for the "last build" file (for serializing builders)
LAST_BUILD_FILENAME = 'last_build'
def get_stable_hash(obj):
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
in unpredictable order due to hash randomization in newer Pythons.
"""
if isinstance(obj, dict):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
return md5(unicode(obj).encode('utf8')).hexdigest()
class StandaloneHTMLBuilder(Builder):
"""
Builds standalone HTML docs.
"""
name = 'html'
format = 'html'
copysource = True
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format = js_index
indexer_dumps_unicode = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
searchindex_filename = 'searchindex.js'
add_permalinks = True
embedded = False # for things like HTML help or Qt help: suppresses sidebar
# This is a class attribute because it is mutated by Sphinx.add_javascript.
script_files = ['_static/jquery.js', '_static/underscore.js',
'_static/doctools.js']
# Dito for this one.
css_files = []
default_sidebars = ['localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
# cached publisher object for snippets
_publisher = None
def init(self):
# a hash of all config values that, if changed, cause a full rebuild
self.config_hash = ''
self.tags_hash = ''
# section numbers for headings in the currently visited document
self.secnumbers = {}
# currently written docname
self.current_docname = None
self.init_templates()
self.init_highlighter()
self.init_translator_class()
if self.config.html_file_suffix is not None:
self.out_suffix = self.config.html_file_suffix
if self.config.html_link_suffix is not None:
self.link_suffix = self.config.html_link_suffix
else:
self.link_suffix = self.out_suffix
if self.config.language is not None:
if self._get_translations_js():
self.script_files.append('_static/translations.js')
def _get_translations_js(self):
candidates = [path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
self.config.language, 'sphinx.js')] + \
[path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs]
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def get_theme_config(self):
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
Theme.init_themes(self.confdir, self.config.html_theme_path,
warn=self.warn)
themename, themeoptions = self.get_theme_config()
self.theme = Theme(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self):
# determine Pygments style and create the highlighter
if self.config.pygments_style is not None:
style = self.config.pygments_style
elif self.theme:
style = self.theme.get_confstr('theme', 'pygments_style', 'none')
else:
style = 'sphinx'
self.highlighter = PygmentsBridge('html', style,
self.config.trim_doctest_flags)
def init_translator_class(self):
if self.config.html_translator_class:
self.translator_class = self.app.import_object(
self.config.html_translator_class,
'html_translator_class setting')
elif self.config.html_use_smartypants:
self.translator_class = SmartyPantsHTMLTranslator
else:
self.translator_class = HTMLTranslator
def get_outdated_docs(self):
cfgdict = dict((name, self.config[name])
for (name, desc) in self.config.values.iteritems()
if desc[1] == 'html')
self.config_hash = get_stable_hash(cfgdict)
self.tags_hash = get_stable_hash(sorted(self.tags))
old_config_hash = old_tags_hash = ''
try:
fp = open(path.join(self.outdir, '.buildinfo'))
try:
version = fp.readline()
if version.rstrip() != '# Sphinx build info version 1':
raise ValueError
fp.readline() # skip commentary
cfg, old_config_hash = fp.readline().strip().split(': ')
if cfg != 'config':
raise ValueError
tag, old_tags_hash = fp.readline().strip().split(': ')
if tag != 'tags':
raise ValueError
finally:
fp.close()
except ValueError:
self.warn('unsupported build info format in %r, building all' %
path.join(self.outdir, '.buildinfo'))
except Exception:
pass
if old_config_hash != self.config_hash or \
old_tags_hash != self.tags_hash:
for docname in self.env.found_docs:
yield docname
return
if self.templates:
template_mtime = self.templates.newest_template_mtime()
else:
template_mtime = 0
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
targetname = self.get_outfilename(docname)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = max(path.getmtime(self.env.doc2path(docname)),
template_mtime)
if srcmtime > targetmtime:
yield docname
except EnvironmentError:
# source doesn't exist anymore
pass
def render_partial(self, node):
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
doc = new_document(b('<partial node>'))
doc.append(node)
if self._publisher is None:
self._publisher = Publisher(
source_class = DocTreeInput,
destination_class=StringOutput)
self._publisher.set_components('standalone',
'restructuredtext', 'pseudoxml')
pub = self._publisher
pub.reader = DoctreeReader()
pub.writer = HTMLWriter(self)
pub.process_programmatic_settings(
None, {'output_encoding': 'unicode'}, None)
pub.set_source(doc, None)
pub.set_destination(None, None)
pub.publish()
return pub.writer.parts
def prepare_writing(self, docnames):
# create the search indexer
from sphinx.search import IndexBuilder, languages
lang = self.config.html_search_language or self.config.language
if not lang or lang not in languages:
lang = 'en'
self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames)
self.docwriter = HTMLWriter(self)
self.docsettings = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
read_config_files=True).get_default_values()
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
# determine the additional indices to include
self.domain_indices = []
# html_domain_indices can be False/True or a list of index names
indices_config = self.config.html_domain_indices
if indices_config:
for domain in self.env.domains.itervalues():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
# deprecated config value
if indexname == 'py-modindex' and \
not self.config.html_use_modindex:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse))
# format the "last updated on" string, only once is enough since it
# typically doesn't include the time of day
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = ustrftime(lufmt or _('%b %d, %Y'))
else:
self.last_updated = None
logo = self.config.html_logo and \
path.basename(self.config.html_logo) or ''
favicon = self.config.html_favicon and \
path.basename(self.config.html_favicon) or ''
if favicon and os.path.splitext(favicon)[1] != '.ico':
self.warn('html_favicon is not an .ico file')
if not isinstance(self.config.html_use_opensearch, basestring):
self.warn('html_use_opensearch config value must now be a string')
self.relations = self.env.collect_relations()
rellinks = []
if self.get_builder_config('use_index', 'html'):
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
# if it has a short name
if indexcls.shortname:
rellinks.append((indexname, indexcls.localname,
'', indexcls.shortname))
if self.config.html_style is not None:
stylename = self.config.html_style
elif self.theme:
stylename = self.theme.get_confstr('theme', 'stylesheet')
else:
stylename = 'default.css'
self.globalcontext = dict(
embedded = self.embedded,
project = self.config.project,
release = self.config.release,
version = self.config.version,
last_updated = self.last_updated,
copyright = self.config.copyright,
master_doc = self.config.master_doc,
use_opensearch = self.config.html_use_opensearch,
docstitle = self.config.html_title,
shorttitle = self.config.html_short_title,
show_copyright = self.config.html_show_copyright,
show_sphinx = self.config.html_show_sphinx,
has_source = self.config.html_copy_source,
show_source = self.config.html_show_sourcelink,
file_suffix = self.out_suffix,
script_files = self.script_files,
css_files = self.css_files,
sphinx_version = __version__,
style = stylename,
rellinks = rellinks,
builder = self.name,
parents = [],
logo = logo,
favicon = favicon,
)
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
self.theme.get_options(self.theme_options).iteritems())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
parents = []
rellinks = self.globalcontext['rellinks'][:]
related = self.relations.get(docname)
titles = self.env.titles
if related and related[2]:
try:
next = {
'link': self.get_relative_uri(docname, related[2]),
'title': self.render_partial(titles[related[2]])['title']
}
rellinks.append((related[2], next['title'], 'N', _('next')))
except KeyError:
next = None
if related and related[1]:
try:
prev = {
'link': self.get_relative_uri(docname, related[1]),
'title': self.render_partial(titles[related[1]])['title']
}
rellinks.append((related[1], prev['title'], 'P', _('previous')))
except KeyError:
# the relation is (somehow) not in the TOC tree, handle
# that gracefully
prev = None
while related and related[0]:
try:
parents.append(
{'link': self.get_relative_uri(docname, related[0]),
'title': self.render_partial(titles[related[0]])['title']})
except KeyError:
pass
related = self.relations.get(related[0])
if parents:
parents.pop() # remove link to the master file; we have a generic
# "back to index" link already
parents.reverse()
# title rendered as HTML
title = self.env.longtitles.get(docname)
title = title and self.render_partial(title)['title'] or ''
# the name for the copied source
sourcename = self.config.html_copy_source and docname + '.txt' or ''
# metadata for the document
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
self_toc = self.env.get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return dict(
parents = parents,
prev = prev,
next = next,
title = title,
meta = meta,
body = body,
metatags = metatags,
rellinks = rellinks,
sourcename = sourcename,
toc = toc,
# only display a TOC if there's more than one item to show
display_toc = (self.env.toc_num_entries[docname] > 1),
)
def write_doc(self, docname, doctree):
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
body = self.docwriter.parts['fragment']
metatags = self.docwriter.clean_meta
ctx = self.get_doc_context(docname, body, metatags)
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.post_process_images(doctree)
title = self.env.longtitles.get(docname)
title = title and self.render_partial(title)['title'] or ''
self.index_page(docname, doctree, title)
def finish(self):
self.info(bold('writing additional files...'), nonl=1)
# pages from extensions
for pagelist in self.app.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
# the global general index
if self.get_builder_config('use_index', 'html'):
self.write_genindex()
# the global domain-specific indices
self.write_domain_indices()
# the search page
if self.name != 'htmlhelp':
self.info(' search', nonl=1)
self.handle_page('search', {}, 'search.html')
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
self.info(' '+pagename, nonl=1)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch and self.name != 'htmlhelp':
self.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
self.info()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
# dump the search index
self.handle_finish()
def write_genindex(self):
# the total count of lines for each index letter, used to distribute
# the entries into two columns
genindex = self.env.create_index(self)
indexcounts = []
for _, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems) in entries))
genindexcontext = dict(
genindexentries = genindex,
genindexcounts = indexcounts,
split_index = self.config.html_split_index,
)
self.info(' genindex', nonl=1)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
'genindex-split.html')
self.handle_page('genindex-all', genindexcontext,
'genindex.html')
for (key, entries), count in zip(genindex, indexcounts):
ctx = {'key': key, 'entries': entries, 'count': count,
'genindexentries': genindex}
self.handle_page('genindex-' + key, ctx,
'genindex-single.html')
else:
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self):
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = dict(
indextitle = indexcls.localname,
content = content,
collapse_index = collapse,
)
self.info(' ' + indexname, nonl=1)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self):
# copy image files
if self.images:
ensuredir(path.join(self.outdir, '_images'))
for src in self.status_iterator(self.images, 'copying images... ',
brown, len(self.images)):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, '_images', dest))
except Exception, err:
self.warn('cannot copy image file %r: %s' %
(path.join(self.srcdir, src), err))
def copy_download_files(self):
# copy downloadable files
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
for src in self.status_iterator(self.env.dlfiles,
'copying downloadable files... ',
brown, len(self.env.dlfiles)):
dest = self.env.dlfiles[src][1]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, '_downloads', dest))
except Exception, err:
self.warn('cannot copy downloadable file %r: %s' %
(path.join(self.srcdir, src), err))
def copy_static_files(self):
# copy static files
self.info(bold('copying static files... '), nonl=True)
ensuredir(path.join(self.outdir, '_static'))
# first, create pygments style file
f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w')
f.write(self.highlighter.get_stylesheet())
f.close()
# then, copy translations JavaScript file
if self.config.language is not None:
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static',
'translations.js'))
# add context items for search function used in searchtools.js_t
ctx = self.globalcontext.copy()
ctx.update(self.indexer.context_for_searchtool())
# then, copy over theme-supplied static files
if self.theme:
themeentries = [path.join(themepath, 'static')
for themepath in self.theme.get_dirchain()[::-1]]
for entry in themeentries:
copy_static_entry(entry, path.join(self.outdir, '_static'),
self, ctx)
# then, copy over all user-supplied static files
staticentries = [path.join(self.confdir, spath)
for spath in self.config.html_static_path]
matchers = compile_matchers(
self.config.exclude_patterns +
['**/' + d for d in self.config.exclude_dirnames]
)
for entry in staticentries:
if not path.exists(entry):
self.warn('html_static_path entry %r does not exist' % entry)
continue
copy_static_entry(entry, path.join(self.outdir, '_static'), self,
ctx, exclude_matchers=matchers)
# copy logo and favicon files if not already in static path
if self.config.html_logo:
logobase = path.basename(self.config.html_logo)
logotarget = path.join(self.outdir, '_static', logobase)
if not path.isfile(logobase):
self.warn('logo file %r does not exist' % logobase)
elif not path.isfile(logotarget):
copyfile(path.join(self.confdir, self.config.html_logo),
logotarget)
if self.config.html_favicon:
iconbase = path.basename(self.config.html_favicon)
icontarget = path.join(self.outdir, '_static', iconbase)
if not path.isfile(icontarget):
copyfile(path.join(self.confdir, self.config.html_favicon),
icontarget)
self.info('done')
def copy_extra_files(self):
# copy html_extra_path files
self.info(bold('copying extra files... '), nonl=True)
extraentries = [path.join(self.confdir, epath)
for epath in self.config.html_extra_path]
for entry in extraentries:
if not path.exists(entry):
self.warn('html_extra_path entry %r does not exist' % entry)
continue
copy_static_entry(entry, self.outdir, self)
self.info('done')
def write_buildinfo(self):
# write build info file
fp = open(path.join(self.outdir, '.buildinfo'), 'w')
try:
fp.write('# Sphinx build info version 1\n'
'# This file hashes the configuration used when building'
' these files. When it is not found, a full rebuild will'
' be done.\nconfig: %s\ntags: %s\n' %
(self.config_hash, self.tags_hash))
finally:
fp.close()
def cleanup(self):
# clean up theme stuff
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree):
"""Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
for node in doctree.traverse(nodes.image):
scale_keys = ('scale', 'width', 'height')
if not any((key in node) for key in scale_keys) or \
isinstance(node.parent, nodes.reference):
# docutils does unfortunately not preserve the
# ``target`` attribute on images, so we need to check
# the parent node here.
continue
uri = node['uri']
reference = nodes.reference('', '', internal=True)
if uri in self.images:
reference['refuri'] = posixpath.join(self.imgpath,
self.images[uri])
else:
reference['refuri'] = uri
node.replace_self(reference)
reference.append(node)
def load_indexer(self, docnames):
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
f = codecs.open(searchindexfn, 'r', encoding='utf-8')
else:
f = open(searchindexfn, 'rb')
try:
self.indexer.load(f, self.indexer_format)
finally:
f.close()
except (IOError, OSError, ValueError):
if keep:
self.warn('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
'incomplete.')
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
# only index pages with title
if self.indexer is not None and title:
self.indexer.feed(pagename, title, doctree)
def _get_local_toctree(self, docname, collapse=True, **kwds):
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(self.env.get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
def has_wildcard(pattern):
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
customsidebar = None
for pattern, patsidebars in self.config.html_sidebars.iteritems():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
self.warn('page %s matches two patterns in '
'html_sidebars: %r and %r' %
(pagename, matched, pattern))
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
matched = pattern
sidebars = patsidebars
if sidebars is None:
# keep defaults
pass
elif isinstance(sidebars, basestring):
# 0.x compatible mode: insert custom sidebar before searchbox
customsidebar = sidebars
sidebars = None
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
def pathto(otheruri, resource=False, baseuri=default_baseuri):
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
elif not resource:
otheruri = self.get_target_uri(otheruri)
uri = relative_uri(baseuri, otheruri) or '#'
return uri
ctx['pathto'] = pathto
ctx['hasdoc'] = lambda name: name in self.env.all_docs
if self.name != 'htmlhelp':
ctx['encoding'] = encoding = self.config.html_output_encoding
else:
ctx['encoding'] = encoding = self.encoding
ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
self.app.emit('html-page-context', pagename, templatename,
ctx, event_arg)
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
self.warn("a Unicode error occurred when rendering the page %s. "
"Please make sure all config values that contain "
"non-ASCII content are Unicode strings." % pagename)
return
if not outfilename:
outfilename = self.get_outfilename(pagename)
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
f = codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace')
try:
f.write(output)
finally:
f.close()
except (IOError, OSError), err:
self.warn("error writing file %s: %s" % (outfilename, err))
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
self.dump_search_index()
self.dump_inventory()
def dump_inventory(self):
self.info(bold('dumping object inventory... '), nonl=True)
f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb')
try:
f.write((u'# Sphinx inventory version 2\n'
u'# Project: %s\n'
u'# Version: %s\n'
u'# The remainder of this file is compressed using zlib.\n'
% (self.config.project, self.config.version)
).encode('utf-8'))
compressor = zlib.compressobj(9)
for domainname, domain in self.env.domains.iteritems():
for name, dispname, type, docname, anchor, prio in \
domain.get_objects():
if anchor.endswith(name):
# this can shorten the inventory by as much as 25%
anchor = anchor[:-len(name)] + '$'
uri = self.get_target_uri(docname) + '#' + anchor
if dispname == name:
dispname = u'-'
f.write(compressor.compress(
(u'%s %s:%s %s %s %s\n' % (name, domainname, type,
prio, uri, dispname)
).encode('utf-8')))
f.write(compressor.flush())
finally:
f.close()
self.info('done')
def dump_search_index(self):
self.info(bold('dumping search index... '), nonl=True)
self.indexer.prune(self.env.all_docs)
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
else:
f = open(searchindexfn + '.tmp', 'wb')
try:
self.indexer.dump(f, self.indexer_format)
finally:
f.close()
movefile(searchindexfn + '.tmp', searchindexfn)
self.info('done')
class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder that creates all HTML pages as "index.html" in
a directory given by their pagename, so that generated URLs don't have
``.html`` in them.
"""
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
return docname[:-5] # up to sep
return docname + SEP
def get_outfilename(self, pagename):
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename)
+ self.out_suffix)
else:
outfilename = path.join(self.outdir, os_path(pagename),
'index' + self.out_suffix)
return outfilename
def prepare_writing(self, docnames):
StandaloneHTMLBuilder.prepare_writing(self, docnames)
self.globalcontext['no_search_suffix'] = True
class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
"""
A StandaloneHTMLBuilder subclass that puts the whole document tree on one
HTML page.
"""
name = 'singlehtml'
copysource = False
def get_outdated_docs(self):
return 'all documents'
def get_target_uri(self, docname, typ=None):
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
'#document-' + docname
else:
# chances are this is a html_additional_page
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree):
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
if 'refuri' not in refnode:
continue
refuri = refnode['refuri']
hashindex = refuri.find('#')
if hashindex < 0:
continue
hashindex = refuri.find('#', hashindex+1)
if hashindex >= 0:
refnode['refuri'] = fname + refuri[hashindex:]
def assemble_doctree(self):
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen)
tree['docname'] = master
self.env.resolve_references(tree, master, self)
self.fix_refuris(tree)
return tree
def get_doc_context(self, docname, body, metatags):
# no relation links...
toc = self.env.get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
if toc:
self.fix_refuris(toc)
toc = self.render_partial(toc)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
return dict(
parents = [],
prev = None,
next = None,
docstitle = None,
title = self.config.html_title,
meta = None,
body = body,
metatags = metatags,
rellinks = [],
sourcename = '',
toc = toc,
display_toc = display_toc,
)
def write(self, *ignored):
docnames = self.env.all_docs
self.info(bold('preparing documents... '), nonl=True)
self.prepare_writing(docnames)
self.info('done')
self.info(bold('assembling single document... '), nonl=True)
doctree = self.assemble_doctree()
self.info()
self.info(bold('writing... '), nonl=True)
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
self.info('done')
def finish(self):
# no indices or search pages are supported
self.info(bold('writing additional files...'), nonl=1)
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
self.info(' '+pagename, nonl=1)
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
self.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
self.info()
self.copy_image_files()
self.copy_download_files()
self.copy_static_files()
self.copy_extra_files()
self.write_buildinfo()
self.dump_inventory()
class SerializingHTMLBuilder(StandaloneHTMLBuilder):
"""
An abstract builder that serializes the generated HTML.
"""
#: the serializing implementation to use. Set this to a module that
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
implementation = None
implementation_dumps_unicode = False
#: additional arguments for dump()
additional_dump_args = ()
#: the filename for the global context file
globalcontext_filename = None
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
def init(self):
self.config_hash = ''
self.tags_hash = ''
self.theme = None # no theme necessary
self.templates = None # no template bridge necessary
self.init_translator_class()
self.init_highlighter()
def get_target_uri(self, docname, typ=None):
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
return docname[:-5] # up to sep
return docname + SEP
def dump_context(self, context, filename):
if self.implementation_dumps_unicode:
f = codecs.open(filename, 'w', encoding='utf-8')
else:
f = open(filename, 'wb')
try:
self.implementation.dump(context, f, *self.additional_dump_args)
finally:
f.close()
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
if not outfilename:
outfilename = path.join(self.outdir,
os_path(pagename) + self.out_suffix)
self.app.emit('html-page-context', pagename, templatename,
ctx, event_arg)
ensuredir(path.dirname(outfilename))
self.dump_context(ctx, outfilename)
# if there is a source file, copy the source file for the
# "show source" link
if ctx.get('sourcename'):
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
StandaloneHTMLBuilder.handle_finish(self)
# copy the environment file from the doctree dir to the output dir
# as needed by the web app
copyfile(path.join(self.doctreedir, ENV_PICKLE_FILENAME),
path.join(self.outdir, ENV_PICKLE_FILENAME))
# touch 'last build' file, used by the web application to determine
# when to reload its environment and clear the cache
open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close()
class PickleHTMLBuilder(SerializingHTMLBuilder):
"""
A Builder that dumps the generated HTML into pickle files.
"""
implementation = pickle
implementation_dumps_unicode = False
additional_dump_args = (pickle.HIGHEST_PROTOCOL,)
indexer_format = pickle
indexer_dumps_unicode = False
name = 'pickle'
out_suffix = '.fpickle'
globalcontext_filename = 'globalcontext.pickle'
searchindex_filename = 'searchindex.pickle'
# compatibility alias
WebHTMLBuilder = PickleHTMLBuilder
class JSONHTMLBuilder(SerializingHTMLBuilder):
"""
A builder that dumps the generated HTML into JSON files.
"""
implementation = jsonimpl
implementation_dumps_unicode = True
indexer_format = jsonimpl
indexer_dumps_unicode = True
name = 'json'
out_suffix = '.fjson'
globalcontext_filename = 'globalcontext.json'
searchindex_filename = 'searchindex.json'
def init(self):
if jsonimpl.json is None:
raise SphinxError(
'The module simplejson (or json in Python >= 2.6) '
'is not available. The JSONHTMLBuilder builder will not work.')
SerializingHTMLBuilder.init(self)
| |
#! /usr/bin/env python
"""Collection of classes for representing finite element meshes."""
import math
import numpy as np
from collections import namedtuple
import ap.mesh.meshtools as meshtools
import ap.mesh.parsers as parsers
def _element_order(num_basis_functions):
"""
Calculate the order of an element based on the number of basis
functions.
This formula is based on solving k = n*(n + 1)/2 (for a triangular
number k) and taking the positive root from the quadratic formula.
"""
return int(round((math.sqrt(1 + 8*num_basis_functions) - 1)/2.0 - 1.0))
def mesh_factory(*args, **kwargs):
"""
Parse a finite element mesh representation and then convert it to a
Mesh or ArgyrisMesh object.
Required Arguments
------------------
* mesh_files : text files comprising the finite element mesh.
Keyword Arguments
-----------------
* argyris : boolean to specify if the mesh should have
additional nodes added to transform it in to an
Argyris mesh. Defaults to False.
* order : Mesh element order. The nodes will be renumbered
appropriately (1 for linears, 2 for quadratics).
Defaults to None. This is not implemented yet.
* projection : function that projects nodes. Defaults to None
(no projection)
* borders : a dictionary correlating names with GMSH 'Physical
Line' attributes. For example,
borders = {'open' : (1, 2)}
will correlate edges on Physical Lines 1 and 2
with the 'open' edge collection.
* default_border : the default edge collection for any edges that
are not in a special_border collection. Defaults
to 'land'.
"""
parsed_mesh = parsers.parser_factory(*args)
if 'argyris' in kwargs:
keywords = kwargs.copy()
del keywords['argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
elif 'Argyris' in kwargs:
keywords = kwargs.copy()
del keywords['Argyris']
return ArgyrisMesh(parsed_mesh, **keywords)
else:
return Mesh(parsed_mesh, **kwargs)
class Mesh(object):
"""
Representation of a finite element mesh. If every node shares the
same final coordinate value (e.g. all z-values are the same) then
this dimension is dropped.
Required Arguments
------------------
* parsed_mesh : Something that has the same interface as a MeshParser
(has fields elements, nodes, and edge_collections)
Optional Arguments
------------------
* borders : A dictionary correlating names with a tuple of
GMSH physical line numbers. for example:
borders = {'no_flow' : (1, 2, 3), 'coast' : (4, 5, 6)}
* default_border : the name corresponding to the default edge
collection. Defaults to "land".
* ignore_given_edges : If True, then throw out the provided edges and
extract them automatically from the element
connectivity matrix. Defaults to False. Useful when
the edges supplied by the parsed mesh could be
erroneous (contain non-mesh information).
* projection : function for transforming nodes (say from 3D to
2D); for example,
projection = lambda x : x[0:2]
will project the nodes down to the XY plane. Defaults to the
identity function.
Properties
----------
* elements : element connectivity matrix.
* nodes : coordinates of nodes.
* edge_collections : a dictionary relating the border names to the
edge tuples that fall along that border. If possible, the last
number in the tuple is the geometrical item number that the edge
falls upon from GMSH. Otherwise it is -1. For example,
print(t.edge_collections)
=> {'land': set([(3, 4, 7, 3), (4, 1, 8, 4), (2, 3, 6, 2),
(1, 2, 5, 1)])}
* boundary_nodes : Set containing the node numbers of nodes on the
boundary.
* interior_nodes : Set containing the node numbers of nodes in the
interior.
* order : Order of the interpolating polynomials.
Methods
-------
* get_nnz() : Calculate the number of nonzero entries in a typical
finite element matrix (e.g. stiffness matrix) based on
the total number of inner products. This will be
exactly the value of the length of one of the
tripplet-form vectors.
* savetxt(prefix="") : Save the mesh as text files
1. prefix + nodes.txt
2. prefix + elements.txt
3. prefix + interior_nodes.txt
4. prefix + boundary_nodes.txt
and, additionally, for each edge collection save
prefix + name + _edges.txt.
"""
def __init__(self, parsed_mesh, borders=None, default_border="land",
ignore_given_edges=False, projection=None):
if borders is None:
borders = {}
self.elements = parsed_mesh.elements
self.nodes = meshtools.project_nodes(projection, parsed_mesh.elements,
parsed_mesh.nodes,
attempt_flatten=True)
self.edge_collections = \
meshtools.organize_edges(parsed_mesh.edges, borders=borders,
default_border=default_border)
if max(map(len, self.edge_collections.values())) == 0 \
or ignore_given_edges:
self.edge_collections = {default_border:
set(meshtools.extract_boundary_edges(self.elements))}
if len(np.unique(self.elements)) != self.nodes.shape[0]:
self._fix_unused_nodes()
self.boundary_nodes = {}
interior_nodes = set(range(1, len(self.nodes)+1))
for name, edge_collection in self.edge_collections.items():
self.boundary_nodes[name] = \
np.fromiter(set(node for edge in edge_collection
for node in edge[0:-1]), int)
interior_nodes -= set(self.boundary_nodes[name])
self.interior_nodes = np.fromiter(interior_nodes, int)
self.order = _element_order(self.elements.shape[1])
self.mean_stepsize = self._get_stepsize()
def get_nnz(self):
"""
Estimate the number of nonzero entries present in some IJV-format
sparse matrix constructed from inner products on this collection of
elements.
"""
return self.elements.shape[1]**2 * self.elements.shape[0]
def savetxt(self, prefix=""):
"""
Save the mesh as a series of text files:
* prefix_nodes.txt : nodal coordinates of the mesh.
* prefix_elements.txt : element connectivity matrix of the mesh.
* prefix_interior_nodes.txt : list of interior nodes of the mesh.
* prefix_NAME_boundary_nodes.txt : list of boundary nodes of the mesh
corresponding to the border with name NAME.
* prefix_NAME_edges.txt : edges in boundary collection NAME, saved in
the traditional GMSH order.
Optional Arguments
------------------
prefix : a string prepended to each of the file names. If nonempty,
prepend exactly 'prefix + "_".'
"""
if prefix:
prefix += "_"
np.savetxt(prefix + "nodes.txt", self.nodes)
np.savetxt(prefix + "elements.txt", self.elements, fmt="%d")
np.savetxt(prefix + "interior_nodes.txt", self.interior_nodes,
fmt="%d")
for (name, collection) in self.boundary_nodes.items():
np.savetxt(prefix + name + "_boundary_nodes.txt", collection,
fmt="%d")
for name, collection in self.edge_collections.items():
np.savetxt(prefix + name + '_edges.txt', [t for t in collection],
fmt='%d')
def _fix_unused_nodes(self):
"""
GMSH has a bug where it saves non-mesh nodes (that is, nodes that
are not members of any element) to some files. Get around that
issue by deleting the extra nodes and renumbering accordingly.
"""
number_of_mesh_nodes = len(np.unique(self.elements))
old_to_new = dict(zip(np.unique(self.elements),
range(1, number_of_mesh_nodes + 1)))
new_to_old = {new_node: old_node
for (old_node, new_node) in old_to_new.items()}
new_elements = np.array([[old_to_new[node] for node in element]
for element in self.elements])
new_nodes = np.array([self.nodes[new_to_old[new_node_number] - 1]
for new_node_number in new_to_old.keys()])
try:
edge_size = {3: 2, 6: 3, 10: 4, 15: 5}[self.elements.shape[1]]
except KeyError:
raise ValueError("Unsupported mesh type")
new_edge_collections = dict()
for key, collection in self.edge_collections.items():
new_edge_collections[key] = set()
for edge in collection:
# geometrical information available
if len(edge) == edge_size + 1:
new_edge = tuple([old_to_new[node]
for node in edge[0:-1]] + [edge[-1]])
new_edge_collections[key].add(new_edge)
# geometrical information not available
elif len(edge) == edge_size:
new_edge = tuple([old_to_new[node] for node in edge])
new_edge_collections[key].add(new_edge)
else:
raise ValueError("Mismatch between size of mesh and" +
" size of edges")
self.edge_collections = new_edge_collections
self.elements = new_elements
self.nodes = new_nodes
def _get_stepsize(self):
"""
Calculate the average stepsize of the mesh (corner to corner).
"""
return np.average(
np.sqrt((self.nodes[self.elements[:, 0] - 1, 0]
- self.nodes[self.elements[:, 1] - 1, 0])**2 +
(self.nodes[self.elements[:, 0] - 1, 1]
- self.nodes[self.elements[:, 1] - 1, 1])**2))
ArgyrisEdge = namedtuple('ArgyrisEdge',
['element_number', 'edge_type', 'edge'])
class ArgyrisMesh(object):
"""
Class to build an Argyris mesh from a parsed mesh. Can handle a mesh
with multiple boundary conditions.
The algorithm is as follows:
1. Treat the current midpoint nodes as the normal derivative basis
functions.
2. Extract the corner nodes as a separate array. Associate each corner node
with five new nodes stacked at the same location.
3. Update nodal coordinates and fix the element order.
Required Arguments
------------------
* mesh : a parsed mesh (inherits from the MeshParser class)
Properties
----------
* elements : a numpy array listing the node numbers of
every element.
* edges_by_midpoint : a dictionary associating each element with a
certain edge (indexed by the normal derivative
basis function number)
* node_collections : a list of ArgyrisNodeCollection objects.
* nodes : a numpy array of node coordinates.
Methods
-------
* savetxt : save the mesh in multiple text files.
"""
def __init__(self, parsed_mesh, borders=None, default_border="land",
ignore_given_edges=False, projection=lambda x: x):
if borders is None:
borders = dict()
if parsed_mesh.elements.shape[1] != 6:
raise NotImplementedError("Support for changing mesh order is not "
+ "implemented.")
# parsed_mesh = meshtools.change_order(parsed_mesh, 2)
lagrange_mesh = Mesh(parsed_mesh, borders=borders,
default_border=default_border,
projection=projection,
ignore_given_edges=ignore_given_edges)
if lagrange_mesh.nodes.shape[1] != 2:
raise ValueError("Requires a 2D mesh; try a different projection.")
self.elements = np.zeros((lagrange_mesh.elements.shape[0], 21),
dtype=np.int)
self.elements[:, 0:6] = lagrange_mesh.elements
# solve a lot of orientation problems later by ensuring that the corner
# nodes are in sorted order.
for element in self.elements:
self._sort_corners_increasing(element[0:6])
# stack the extra basis function nodes on the corners.
max_lagrange_mesh = lagrange_mesh.elements.max() + 1
self.stacked_nodes = \
{node_number: np.arange(max_lagrange_mesh + 5*count,
max_lagrange_mesh + 5*count + 5)
for count, node_number in
enumerate(np.unique(self.elements[:, 0:3]))}
for element in self.elements:
element[6:11] = self.stacked_nodes[element[0]]
element[11:16] = self.stacked_nodes[element[1]]
element[16:21] = self.stacked_nodes[element[2]]
self._fix_argyris_node_order()
# update the edges by elements.
self.edges_by_midpoint = dict()
edge_type_to_nodes = {1: (0, 1, 18), 2: (0, 2, 19), 3: (1, 2, 20)}
for element_number, element in enumerate(self.elements):
for edge_type in range(1, 4):
(i, j, k) = edge_type_to_nodes[edge_type]
edge = ArgyrisEdge(element_number=element_number + 1,
edge_type=edge_type,
edge=(element[i], element[j], element[k]))
if element[17 + edge_type] in self.edges_by_midpoint:
if (self.edges_by_midpoint[element[17 + edge_type]].edge
!= edge.edge):
raise ValueError("Mesh is not consistent")
else:
self.edges_by_midpoint[element[17 + edge_type]] = edge
# set coordinates for the new nodes.
self.nodes = np.zeros((self.elements.max(), 2))
self.nodes.fill(np.nan)
self.nodes[0:lagrange_mesh.nodes.shape[0], :] = lagrange_mesh.nodes
for stacked_node, new_nodes in self.stacked_nodes.items():
self.nodes[new_nodes - 1] = self.nodes[stacked_node - 1]
# Construct the edge collections.
self.node_collections = []
self._build_node_collections(lagrange_mesh)
def savetxt(self, prefix=""):
"""
Save the following text files:
nodes.txt : nodal coordinates
elements.txt : element connectivity matrix
and for each collection of nodes with key NAME:
NAME_edges.txt : all edge tuples (end, end, midpoint)
NAME_all.txt : all numbers of nodes in the collection.
"""
if prefix:
prefix += "_"
np.savetxt(prefix + 'nodes.txt', self.nodes)
np.savetxt(prefix + 'elements.txt', self.elements, fmt="%d")
for collection in self.node_collections:
prefix = prefix[0:-1]
collection.savetxt(prefix)
def _sort_corners_increasing(self, element):
"""
Ensure that the corners of the input quadratic element are in
increasing order. For example, convert
1 3 2 4 6 5
to
1 2 3 5 6 4
"""
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
if element[1] > element[2]:
element[2], element[1] = element[1], element[2]
element[3], element[5] = element[5], element[3]
if element[0] > element[2]:
element[2], element[0] = element[0], element[2]
element[3], element[4] = element[4], element[3]
if element[0] > element[1]:
element[0], element[1] = element[1], element[0]
element[4], element[5] = element[5], element[4]
def _build_node_collections(self, lagrange_mesh):
"""
Handle the edges by building a list of ArgyrisNodeCollection
objects. This is done by extracting the information regarding
corner nodes and midpoints from the lagrange edge data and saving
the interior nodes as everything that was not a boundary node.
"""
interior_function_values = set(lagrange_mesh.elements[:, 0:3].flat)
interior_normal_derivatives = set(lagrange_mesh.elements[:, 3:6].flat)
for border_name, collection in lagrange_mesh.edge_collections.items():
# save left points of edges.
function_values = {x[0] for x in collection}
normal_derivatives = {x[2] for x in collection}
self.node_collections.append(
ArgyrisNodeCollection(function_values, normal_derivatives,
collection, self, name=border_name))
interior_function_values.difference_update(function_values)
interior_normal_derivatives.difference_update(normal_derivatives)
self.node_collections.append(ArgyrisNodeCollection(
interior_function_values, interior_normal_derivatives, [], self,
name='interior'))
def _fix_argyris_node_order(self):
"""
Fix the node orderings from the constructed format
[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
to the usual Argyris format of
[1 2 3 7 8 12 13 17 18 9 10 11 14 15 16 19 20 21 4 6 5].
"""
normal_derivatives1 = self.elements[:, 3].copy()
normal_derivatives2 = self.elements[:, 5].copy()
normal_derivatives3 = self.elements[:, 4].copy()
first_nodes = self.elements[:, 6:11].copy()
second_nodes = self.elements[:, 11:16].copy()
third_nodes = self.elements[:, 16:21].copy()
self.elements[:, 18] = normal_derivatives1
self.elements[:, 19] = normal_derivatives2
self.elements[:, 20] = normal_derivatives3
self.elements[:, 3:5] = first_nodes[:, 0:2]
self.elements[:, 9:12] = first_nodes[:, 2:5]
self.elements[:, 5:7] = second_nodes[:, 0:2]
self.elements[:, 12:15] = second_nodes[:, 2:5]
self.elements[:, 7:9] = third_nodes[:, 0:2]
self.elements[:, 15:18] = third_nodes[:, 2:5]
class ArgyrisNodeCollection(object):
"""
Contains information about a group of nodes in an Argyris Mesh and any
relevant edge data.
Required Arguments
------------------
* function_values : set of basis function numbers that
approximate function values on the Argyris
mesh.
* normal_derivatives : set of the node numbers corresponding to
normal derivative basis functions.
* edges : set of tuples corresponding to
(endpoint, endpoint, midpoint)
* mesh : the relevant Argyris mesh.
Optional Arguments
------------------
* name : prefix on the output files. Defaults to 'inner'.
"""
def __init__(self, function_values, normal_derivatives,
edges, mesh, name='inner'):
self.function_values = function_values
self.normal_derivatives = normal_derivatives
self.name = name
self.stacked_nodes = {node: mesh.stacked_nodes[node] for node in
self.function_values}
self.edges = [mesh.edges_by_midpoint[edge[-2]] for edge in edges]
def savetxt(self, prefix=""):
"""
Save the data to text files; place all node numbers in the collection
in one file and all information on edges in another.
"""
if prefix:
prefix += "_"
if self.edges: # don't save if there are no edges.
edge_array = np.array([[edge.element_number, edge.edge_type,
edge.edge[0], edge.edge[1], edge.edge[2]]
for edge in self.edges])
np.savetxt(prefix + self.name + "_edges.txt", edge_array, "%d")
# Use list comprehensions because they do the same thing in python2.7
# and python3.*; *.values() became an iterator in python3000.
np.savetxt(prefix + self.name + "_all.txt",
np.unique(np.hstack(
[x for x in self.stacked_nodes.values()] +
[x for x in self.stacked_nodes.keys()] +
[x for x in self.normal_derivatives])), "%d")
def __str__(self):
"""For interactive debugging use."""
return ("Node collection name: " + self.name + "\n" +
"function values:\n" + str(self.function_values) + "\n" +
"normal derivatives:\n" + str(self.normal_derivatives) + "\n")
| |
"""
A live connection to a REDCap database.
This serves largely to wrap associated functionality around a PyCap connection,
mostly just for neatness.
"""
### IMPORTS
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import object
import csv
# for import_records_chunked()
import time
# Py2 vs py3
try:
from StringIO import StringIO
except:
from io import StringIO
import redcap
from . import consts
from . import utils
from . import csvutils
__all__ = [
'Connection'
]
### CONSTANT & DEFINES
### CODE ###
class Connection (redcap.Project):
"""
A live connection to a REDCap database.
Mostly just a wrapping of the PyCap interface, for neatness.
"""
def __init__ (self, url, token):
"""
Initialise connection to REDCap project.
Args:
url (str): URL of the database
token (str): the long alphanumeric access string. Note that this is
specific to a database and a user (and that user's permissions).
For example::
>>> import os
>>> url = os.environ['REDCAPHELPER_TEST_URL']
>>> token = os.environ['REDCAPHELPER_TEST_TOKEN']
>>> conn = Connection (url, token)
"""
## Preconditions:
assert url.startswith ('http'), \
"redcap api url '%s' is deformed " % url
assert url.endswith ('/'), \
"redcap api url '%s' does not end in forward-slash" % url
## Main:
super (Connection, self).__init__ (url, token)
def import_records_chunked (self, recs, chunk_sz=consts.DEF_UPLOAD_CHUNK_SZ,
sleep=consts.DEF_SLEEP, overwrite=True):
"""
Import records into the attached database.
Args:
recs (sequence of dicts): data to be uploaded
chunk_sz (int): number of records to be uploaded in each batch
sleep (int): seconds to wait between each upload
overwrite (bool): should values missing in the import be overwritten
Importing is a memory- (and CPU-) hungry process for REDCap. Thus it is
necessary to manually throttle the process by uploading data in chunks
and pausing between chunks.
"""
# TODO(paul): exactly what does the overwrite arg do?
# TODO(paul): print better response & handle reponses better
# TODO(paul): have seperate upload_chunk function for better handling?
# TODO(paul): need date format option?
## Main:
id_fld = self.def_field
total_len = len (recs)
for start, stop in utils.chunked_enumerate (recs, chunk_sz):
msg = "Uploading records %s-%s of %s ('%s' to '%s')" % (
start, stop-1, total_len, recs[start][id_fld], recs[stop-1][id_fld]
)
utils.msg_progress (msg)
response = self.import_records (
recs[start:stop],
overwrite='overwrite' if overwrite else 'normal'
)
# XXX(paul): more informative messages
utils.msg_progress (response)
if sleep and (stop != total_len):
time.sleep (sleep)
def export_records_chunked (self,
records=None, fields=None, forms=None, events=None,
raw_or_label='raw', event_name='label', format='json',
export_survey_fields=False, export_data_access_groups=False,
df_kwargs=None, chunk_sz=consts.DEF_DOWNLOAD_CHUNK_SZ,
):
"""
Download data in chunks to avoid memory errors.
Args:
chunk_sz (int): number of records to be downloaded in each batch
format ('json', 'csv', 'dicts', 'xml', 'df'): the format of data to
be returned
Returns:
a series of records in one format or another
Only arguments peculiar to this function are detailed. See
`export_records` for all others.
Exporting is also a memory-hungry process for REDCap. Thus we make it
easy on the server by batching up the downloaded records and combining
them ourselves.
"""
# TODO(paul): combine chunking functions?
# XXX(paul): dataframe export format should be easy and useful?
## Preconditions & preparation:
assert format in ('json', 'csv', 'dicts', 'xml', 'df'), \
"unrecognised export format '%s'" % format
## Main:
# if downloading all records get list
if not records:
id_fld = self.def_field
record_list = self.export_records (fields=[id_fld])
records = [r[id_fld] for r in record_list]
# work out download format
dl_format = 'csv' if format in ('dicts') else format
# now do the actual download
try:
responses = []
total_len = len (records)
for start, stop in utils.chunked_enumerate (records, chunk_sz):
msg = "Downloading records %s-%s of %s ('%s' to '%s')" % (
start+1, stop, total_len, records[start], records[stop-1]
)
utils.msg_progress (msg)
chunked_response = self.export_records (
records=records[start:stop],
fields=fields,
forms=forms,
events=events,
raw_or_label=raw_or_label,
event_name=event_name,
format=dl_format,
export_survey_fields=export_survey_fields,
export_data_access_groups=export_data_access_groups,
df_kwargs=df_kwargs,
)
# TODO: need to handle all possible ways of extending data
responses.append (chunked_response)
except redcap.RedcapError:
# XXX(paul): shouldn't we just raise the intial error
msg = "Chunked export failed for chunk_size %s" % chunk_sz
raise ValueError (msg)
else:
# combine all the responses: json, csv, dicts, xml, df
combined_reponses = None
if dl_format == 'csv':
combined_reponses = ''.join (responses)
if dl_format == 'json':
combined_reponses = []
for x in responses:
combined_responses.extend (x)
if dl_format == 'xml':
pass
if dl_format == 'df':
pass
# need to translate back into
if format == 'dicts':
response = csvutils.csv_str_to_dicts (combined_reponses)
return combined_reponses
def export_schema (self):
"""
Download the project schema (fields).
The project object actually contains the project schema as metadata
but in a slightly awkward format. This returns the schema as a series of
dicts giving the fields and their various options like validation.
These will be in the order they appear in the project.
"""
csv_txt = self.export_metadata (format='csv')
return csvutils.csv_str_to_dicts (csv_txt)
# def export_field_names (self):
# """
# Download the project fields.
#
# These will be in the order they appear in the project. Remember that in
# REDCap, IDs are "names" and titles are "labels".
#
# """
# return [r['Variable / Field Name'] for r in self.export_schema()]
### END ###
| |
from pythonforandroid.build import Context
from pythonforandroid.graph import (
fix_deplist, get_dependency_tuple_list_for_recipe,
get_recipe_order_and_bootstrap, obvious_conflict_checker,
)
from pythonforandroid.bootstrap import Bootstrap
from pythonforandroid.recipe import Recipe
from pythonforandroid.util import BuildInterruptingException
from itertools import product
import mock
import pytest
ctx = Context()
name_sets = [['python2'],
['kivy']]
bootstraps = [None,
Bootstrap.get_bootstrap('sdl2', ctx)]
valid_combinations = list(product(name_sets, bootstraps))
valid_combinations.extend(
[(['python3crystax'], Bootstrap.get_bootstrap('sdl2', ctx)),
(['kivy', 'python3crystax'], Bootstrap.get_bootstrap('sdl2', ctx)),
(['flask'], Bootstrap.get_bootstrap('webview', ctx)),
(['pysdl2'], None), # auto-detect bootstrap! important corner case
]
)
invalid_combinations = [
[['python2', 'python3crystax'], None],
[['pysdl2', 'genericndkbuild'], None],
]
invalid_combinations_simple = list(invalid_combinations)
# NOTE !! keep in mind when setting invalid_combinations_simple:
#
# This is used to test obvious_conflict_checker(), which only
# catches CERTAIN conflicts:
#
# This must be a list of conflicts where the conflict is ONLY in
# non-tuple/non-ambiguous dependencies, e.g.:
#
# dependencies_1st = ["python2", "pillow"]
# dependencies_2nd = ["python3crystax", "pillow"]
#
# This however won't work:
#
# dependencies_1st = [("python2", "python3"), "pillow"]
# dependencies_2nd = [("python2legacy", "python3crystax"), "pillow"]
#
# (This is simply because the conflict checker doesn't resolve this to
# keep the code ismple enough)
def get_fake_recipe(name, depends=None, conflicts=None):
recipe = mock.Mock()
recipe.name = name
recipe.get_opt_depends_in_list = lambda: []
recipe.get_dir_name = lambda: name
recipe.depends = list(depends or [])
recipe.conflicts = list(conflicts or [])
return recipe
def register_fake_recipes_for_test(monkeypatch, recipe_list):
_orig_get_recipe = Recipe.get_recipe
def mock_get_recipe(name, ctx):
for recipe in recipe_list:
if recipe.name == name:
return recipe
return _orig_get_recipe(name, ctx)
# Note: staticmethod() needed for python ONLY, don't ask me why:
monkeypatch.setattr(Recipe, 'get_recipe', staticmethod(mock_get_recipe))
@pytest.mark.parametrize('names,bootstrap', valid_combinations)
def test_valid_recipe_order_and_bootstrap(names, bootstrap):
get_recipe_order_and_bootstrap(ctx, names, bootstrap)
@pytest.mark.parametrize('names,bootstrap', invalid_combinations)
def test_invalid_recipe_order_and_bootstrap(names, bootstrap):
with pytest.raises(BuildInterruptingException) as e_info:
get_recipe_order_and_bootstrap(ctx, names, bootstrap)
assert "conflict" in e_info.value.message.lower()
def test_blacklist():
# First, get order without blacklist:
build_order, python_modules, bs = get_recipe_order_and_bootstrap(
ctx, ["python3", "kivy"], None
)
# Now, obtain again with blacklist:
build_order_2, python_modules_2, bs_2 = get_recipe_order_and_bootstrap(
ctx, ["python3", "kivy"], None, blacklist=["libffi"]
)
assert "libffi" not in build_order_2
assert set(build_order_2).union({"libffi"}) == set(build_order)
# Check that we get a conflict when using webview and kivy combined:
wbootstrap = Bootstrap.get_bootstrap('webview', ctx)
with pytest.raises(BuildInterruptingException) as e_info:
get_recipe_order_and_bootstrap(ctx, ["flask", "kivy"], wbootstrap)
assert "conflict" in e_info.value.message.lower()
# We should no longer get a conflict blacklisting sdl2:
get_recipe_order_and_bootstrap(
ctx, ["flask", "kivy"], wbootstrap, blacklist=["sdl2"]
)
def test_get_dependency_tuple_list_for_recipe(monkeypatch):
r = get_fake_recipe("recipe1", depends=[
"libffi",
("libffi", "Pillow")
])
dep_list = get_dependency_tuple_list_for_recipe(
r, blacklist={"libffi"}
)
assert(dep_list == [("pillow",)])
@pytest.mark.parametrize('names,bootstrap', valid_combinations)
def test_valid_obvious_conflict_checker(names, bootstrap):
# Note: obvious_conflict_checker is stricter on input
# (needs fix_deplist) than get_recipe_order_and_bootstrap!
obvious_conflict_checker(ctx, fix_deplist(names))
@pytest.mark.parametrize('names,bootstrap',
invalid_combinations_simple # see above for why this
) # is a separate list
def test_invalid_obvious_conflict_checker(names, bootstrap):
# Note: obvious_conflict_checker is stricter on input
# (needs fix_deplist) than get_recipe_order_and_bootstrap!
with pytest.raises(BuildInterruptingException) as e_info:
obvious_conflict_checker(ctx, fix_deplist(names))
assert "conflict" in e_info.value.message.lower()
def test_misc_obvious_conflict_checker(monkeypatch):
# Check that the assert about wrong input data is hit:
with pytest.raises(AssertionError) as e_info:
obvious_conflict_checker(
ctx,
["this_is_invalid"]
# (invalid because it isn't properly nested as tuple)
)
# Test that non-recipe dependencies work in overall:
obvious_conflict_checker(
ctx, fix_deplist(["python3", "notarecipelibrary"])
)
# Test that a conflict with a non-recipe dependency works:
# This is currently not used, so we need a custom test recipe:
# To get that, we simply modify one!
with monkeypatch.context() as m:
register_fake_recipes_for_test(m, [
get_fake_recipe("recipe1", conflicts=[("fakelib")]),
])
with pytest.raises(BuildInterruptingException) as e_info:
obvious_conflict_checker(ctx, fix_deplist(["recipe1", "fakelib"]))
assert "conflict" in e_info.value.message.lower()
# Test a case where a recipe pulls in a conditional tuple
# of additional dependencies. This is e.g. done for ('python3',
# 'python2', ...) but most recipes don't depend on this anymore,
# so we need to add a manual test for this case:
with monkeypatch.context() as m:
register_fake_recipes_for_test(m, [
get_fake_recipe("recipe1", depends=[("libffi", "Pillow")]),
])
obvious_conflict_checker(ctx, fix_deplist(["recipe1"]))
def test_indirectconflict_obvious_conflict_checker(monkeypatch):
# Test a case where there's an indirect conflict, which also
# makes sure the error message correctly blames the OUTER recipes
# as original conflict source:
with monkeypatch.context() as m:
register_fake_recipes_for_test(m, [
get_fake_recipe("outerrecipe1", depends=["innerrecipe1"]),
get_fake_recipe("outerrecipe2", depends=["innerrecipe2"]),
get_fake_recipe("innerrecipe1"),
get_fake_recipe("innerrecipe2", conflicts=["innerrecipe1"]),
])
with pytest.raises(BuildInterruptingException) as e_info:
obvious_conflict_checker(
ctx,
fix_deplist(["outerrecipe1", "outerrecipe2"])
)
assert ("conflict" in e_info.value.message.lower() and
"outerrecipe1" in e_info.value.message.lower() and
"outerrecipe2" in e_info.value.message.lower())
def test_multichoice_obvious_conflict_checker(monkeypatch):
# Test a case where there's a conflict with a multi-choice tuple:
with monkeypatch.context() as m:
register_fake_recipes_for_test(m, [
get_fake_recipe("recipe1", conflicts=["lib1", "lib2"]),
get_fake_recipe("recipe2", depends=[("lib1", "lib2")]),
])
with pytest.raises(BuildInterruptingException) as e_info:
obvious_conflict_checker(
ctx,
fix_deplist([("lib1", "lib2"), "recipe1"])
)
assert "conflict" in e_info.value.message.lower()
def test_bootstrap_dependency_addition():
build_order, python_modules, bs = get_recipe_order_and_bootstrap(
ctx, ['kivy'], None)
assert (('hostpython2' in build_order) or ('hostpython3' in build_order))
def test_graph_deplist_transformation():
test_pairs = [
(["Pillow", ('python2', 'python3')],
[('pillow',), ('python2', 'python3')]),
(["Pillow", ('python2',)],
[('pillow',), ('python2',)]),
]
for (before_list, after_list) in test_pairs:
assert fix_deplist(before_list) == after_list
def test_bootstrap_dependency_addition2():
build_order, python_modules, bs = get_recipe_order_and_bootstrap(
ctx, ['kivy', 'python2'], None)
assert 'hostpython2' in build_order
if __name__ == "__main__":
get_recipe_order_and_bootstrap(ctx, ['python3'],
Bootstrap.get_bootstrap('sdl2', ctx))
| |
"""
Miscellaneous actions for generally housekeeping the state of the GUI
"""
from .. import guiplugins, guiutils
from ordereddict import OrderedDict
from default.batch import BatchApplicationData, MailSender
import os, plugins, gtk
class Quit(guiplugins.BasicActionGUI):
def __init__(self, allApps, dynamic, inputOptions):
guiplugins.BasicActionGUI.__init__(self, allApps, dynamic, inputOptions)
self.runName = inputOptions.get("name", "") if dynamic else None
def _getStockId(self):
return "quit"
def _getTitle(self):
return "_Quit"
def isActiveOnCurrent(self, *args):
return True
def getSignalsSent(self):
return [ "Quit" ]
def performOnCurrent(self):
self.notify("Quit")
def notifySetRunName(self, runName):
if self.runName is not None:
self.runName = runName
def messageAfterPerform(self):
pass # GUI isn't there to show it
def getConfirmationMessage(self):
message = ""
if self.runName and not self.runName.startswith("Tests started from"):
message = "You named this run as follows : \n" + self.runName + "\n"
runningProcesses = guiplugins.processMonitor.listRunningProcesses()
if len(runningProcesses) > 0:
message += "\nThese processes are still running, and will be terminated when quitting: \n\n + " + \
"\n + ".join(runningProcesses) + "\n"
if message:
message += "\nQuit anyway?\n"
return message
def notifyWindowClosed(self, *args):
confirmationMessage = self.getConfirmationMessage()
if confirmationMessage:
dialog = self.showQueryDialog(self.getParentWindow(), confirmationMessage,
gtk.STOCK_DIALOG_WARNING, "Confirmation", None)
responseId = dialog.run()
saidCancel = responseId not in [ gtk.RESPONSE_ACCEPT, gtk.RESPONSE_YES, gtk.RESPONSE_OK ]
dialog.hide()
dialog.destroy()
if saidCancel:
self.cancel()
raise guiplugins.CloseWindowCancelException("Closing window")
self.notify("Quit")
class ResetGroups(guiplugins.BasicActionGUI):
def isActiveOnCurrent(self, *args):
return True
def _getStockId(self):
return "revert-to-saved"
def _getTitle(self):
return "R_eset"
def messageAfterPerform(self):
return "All options reset to default values."
def getTooltip(self):
return "Reset running options"
def getSignalsSent(self):
return [ "Reset" ]
def performOnCurrent(self):
self.notify("Reset")
class SetRunName(guiplugins.ActionDialogGUI):
def __init__(self, *args):
guiplugins.ActionDialogGUI.__init__(self, *args)
self.addOption("name", "\nNew name for this run")
def isActiveOnCurrent(self, *args):
return True
def _getStockId(self):
return "index"
def _getTitle(self):
return "Set Run Name"
def messageAfterPerform(self):
pass
def getDialogTitle(self):
return "Set a new name for this run"
def getTooltip(self):
return "Provide a name for this run and warn before closing it"
def getSignalsSent(self):
return [ "SetRunName" ]
def performOnCurrent(self):
name = self.optionGroup.getOptionValue("name")
self.notify("SetRunName", name)
self.notify("Status", "Set name of run to '" + name + "'")
class RefreshAll(guiplugins.BasicActionGUI):
def __init__(self, *args):
guiplugins.BasicActionGUI.__init__(self, *args)
self.rootTestSuites = []
def _getTitle(self):
return "Refresh"
def _getStockId(self):
return "refresh"
def getTooltip(self):
return "Refresh the whole test suite so that it reflects file changes"
def messageBeforePerform(self):
return "Refreshing the whole test suite..."
def messageAfterPerform(self):
return "Refreshed the test suite from the files"
def addSuites(self, suites):
self.rootTestSuites += suites
def notifyRefresh(self):
# when done indirectly
self.performOnCurrent()
def performOnCurrent(self):
for suite in self.rootTestSuites:
self.notify("ActionProgress")
suite.app.setUpConfiguration()
self.notify("ActionProgress")
filters = suite.app.getFilterList(self.rootTestSuites)
suite.refresh(filters)
suite.refreshFilesRecursively()
class ViewScreenshots(guiplugins.ActionGUI):
def _getTitle(self):
return "View screenshots"
def isActiveOnCurrent(self, *args):
if len(self.currTestSelection) != 1:
return False
screenshotDir = self.getScreenshotDir()
return os.path.isdir(screenshotDir)
def getScreenshotDir(self):
return os.path.join(self.currTestSelection[0].getDirectory(temporary=True), "screenshots")
def performOnCurrent(self):
screenshotDir = self.getScreenshotDir()
allFiles = os.listdir(screenshotDir)
allFiles.sort(key=self.getSortKey)
allPaths = [ os.path.join(screenshotDir, f) for f in allFiles ]
guiplugins.openLinkInBrowser(*allPaths)
def getSortKey(self, fileName):
number = fileName[10:-4]
return int(number) if number.isdigit() else 0
class GenerateTestSummary(guiplugins.ActionDialogGUI):
def __init__(self, *args):
guiplugins.ActionDialogGUI.__init__(self, *args)
self.addOption("generate", "",possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True)
self.batchAppData = OrderedDict()
self.allApps = OrderedDict()
def performOnCurrent(self):
fileName = self.getFileName()
for test in self.currTestSelection:
if test.state.isComplete():
if not self.batchAppData.has_key(test.app):
self.addApplication(test)
self.batchAppData[test.app].storeCategory(test)
self.writeTextSummary(fileName)
def writeTextSummary(self, fileName):
mailSender = MailSender()
with open(fileName, "w") as f:
for appList in self.allApps.values():
batchDataList = map(self.batchAppData.get, appList)
f.write(mailSender.makeContents(batchDataList, False))
def getFileName(self):
fileName = self.optionGroup.getOptionValue("generate")
if not fileName:
raise plugins.TextTestError, "Cannot save selection - no file name specified"
elif os.path.isdir(fileName):
raise plugins.TextTestError, "Cannot save selection - existing directory specified"
else:
return fileName
def _getTitle(self):
return "Generate test summary"
def getRootSuite(self, test):
if test.parent:
return self.getRootSuite(test.parent)
else:
return test
def addApplication(self, test):
rootSuite = self.getRootSuite(test)
app = test.app
self.batchAppData[app] = BatchApplicationData(rootSuite)
self.allApps.setdefault(app.name, []).append(app)
def getInteractiveActionClasses(dynamic):
classes = [ Quit, SetRunName ]
if dynamic:
classes.append(ViewScreenshots)
classes.append(GenerateTestSummary)
else:
classes += [ RefreshAll, ResetGroups ]
return classes
| |
import operator
from . import libmp
from .libmp.backend import basestring
from .libmp import (
int_types, MPZ_ONE,
prec_to_dps, dps_to_prec, repr_dps,
round_floor, round_ceiling,
fzero, finf, fninf, fnan,
mpf_le, mpf_neg,
from_int, from_float, from_str, from_rational,
mpi_mid, mpi_delta, mpi_str,
mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub,
mpi_mul, mpi_div, mpi_pow_int, mpi_pow,
mpi_from_str,
mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
mpci_abs, mpci_pow, mpci_exp, mpci_log,
ComplexResult,
mpf_hash, mpc_hash)
from .matrices.matrices import _matrix
mpi_zero = (fzero, fzero)
from .ctx_base import StandardBaseContext
new = object.__new__
def convert_mpf_(x, prec, rounding):
if hasattr(x, "_mpf_"): return x._mpf_
if isinstance(x, int_types): return from_int(x, prec, rounding)
if isinstance(x, float): return from_float(x, prec, rounding)
if isinstance(x, basestring): return from_str(x, prec, rounding)
raise NotImplementedError
class ivmpf(object):
"""
Interval arithmetic class. Precision is controlled by iv.prec.
"""
def __new__(cls, x=0):
return cls.ctx.convert(x)
def cast(self, cls, f_convert):
a, b = self._mpi_
if a == b:
return cls(f_convert(a))
raise ValueError
def __int__(self):
return self.cast(int, libmp.to_int)
def __float__(self):
return self.cast(float, libmp.to_float)
def __complex__(self):
return self.cast(complex, libmp.to_float)
def __hash__(self):
a, b = self._mpi_
if a == b:
return mpf_hash(a)
else:
return hash(self._mpi_)
@property
def real(self): return self
@property
def imag(self): return self.ctx.zero
def conjugate(self): return self
@property
def a(self):
a, b = self._mpi_
return self.ctx.make_mpf((a, a))
@property
def b(self):
a, b = self._mpi_
return self.ctx.make_mpf((b, b))
@property
def mid(self):
ctx = self.ctx
v = mpi_mid(self._mpi_, ctx.prec)
return ctx.make_mpf((v, v))
@property
def delta(self):
ctx = self.ctx
v = mpi_delta(self._mpi_, ctx.prec)
return ctx.make_mpf((v,v))
@property
def _mpci_(self):
return self._mpi_, mpi_zero
def _compare(*args):
raise TypeError("no ordering relation is defined for intervals")
__gt__ = _compare
__le__ = _compare
__gt__ = _compare
__ge__ = _compare
def __contains__(self, t):
t = self.ctx.mpf(t)
return (self.a <= t.a) and (t.b <= self.b)
def __str__(self):
return mpi_str(self._mpi_, self.ctx.prec)
def __repr__(self):
if self.ctx.pretty:
return str(self)
a, b = self._mpi_
n = repr_dps(self.ctx.prec)
a = libmp.to_str(a, n)
b = libmp.to_str(b, n)
return "mpi(%r, %r)" % (a, b)
def _compare(s, t, cmpfun):
if not hasattr(t, "_mpi_"):
try:
t = s.ctx.convert(t)
except:
return NotImplemented
return cmpfun(s._mpi_, t._mpi_)
def __eq__(s, t): return s._compare(t, libmp.mpi_eq)
def __ne__(s, t): return s._compare(t, libmp.mpi_ne)
def __lt__(s, t): return s._compare(t, libmp.mpi_lt)
def __le__(s, t): return s._compare(t, libmp.mpi_le)
def __gt__(s, t): return s._compare(t, libmp.mpi_gt)
def __ge__(s, t): return s._compare(t, libmp.mpi_ge)
def __abs__(self):
return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec))
def __pos__(self):
return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec))
def __neg__(self):
return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec))
def ae(s, t, rel_eps=None, abs_eps=None):
return s.ctx.almosteq(s, t, rel_eps, abs_eps)
class ivmpc(object):
def __new__(cls, re=0, im=0):
re = cls.ctx.convert(re)
im = cls.ctx.convert(im)
y = new(cls)
y._mpci_ = re._mpi_, im._mpi_
return y
def __hash__(self):
(a, b), (c,d) = self._mpci_
if a == b and c == d:
return mpc_hash((a, c))
else:
return hash(self._mpci_)
def __repr__(s):
if s.ctx.pretty:
return str(s)
return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag))
def __str__(s):
return "(%s + %s*j)" % (str(s.real), str(s.imag))
@property
def a(self):
(a, b), (c,d) = self._mpci_
return self.ctx.make_mpf((a, a))
@property
def b(self):
(a, b), (c,d) = self._mpci_
return self.ctx.make_mpf((b, b))
@property
def c(self):
(a, b), (c,d) = self._mpci_
return self.ctx.make_mpf((c, c))
@property
def d(self):
(a, b), (c,d) = self._mpci_
return self.ctx.make_mpf((d, d))
@property
def real(s):
return s.ctx.make_mpf(s._mpci_[0])
@property
def imag(s):
return s.ctx.make_mpf(s._mpci_[1])
def conjugate(s):
a, b = s._mpci_
return s.ctx.make_mpc((a, mpf_neg(b)))
def overlap(s, t):
t = s.ctx.convert(t)
real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b)
imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d)
return real_overlap and imag_overlap
def __contains__(s, t):
t = s.ctx.convert(t)
return t.real in s.real and t.imag in s.imag
def _compare(s, t, ne=False):
if not isinstance(t, s.ctx._types):
try:
t = s.ctx.convert(t)
except:
return NotImplemented
if hasattr(t, '_mpi_'):
tval = t._mpi_, mpi_zero
elif hasattr(t, '_mpci_'):
tval = t._mpci_
if ne:
return s._mpci_ != tval
return s._mpci_ == tval
def __eq__(s, t): return s._compare(t)
def __ne__(s, t): return s._compare(t, True)
def __lt__(s, t): raise TypeError("complex intervals cannot be ordered")
__le__ = __gt__ = __ge__ = __lt__
def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec))
def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec))
def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec))
def ae(s, t, rel_eps=None, abs_eps=None):
return s.ctx.almosteq(s, t, rel_eps, abs_eps)
def _binary_op(f_real, f_complex):
def g_complex(ctx, sval, tval):
return ctx.make_mpc(f_complex(sval, tval, ctx.prec))
def g_real(ctx, sval, tval):
try:
return ctx.make_mpf(f_real(sval, tval, ctx.prec))
except ComplexResult:
sval = (sval, mpi_zero)
tval = (tval, mpi_zero)
return g_complex(ctx, sval, tval)
def lop_real(s, t):
if isinstance(t, _matrix): return NotImplemented
ctx = s.ctx
if not isinstance(t, ctx._types): t = ctx.convert(t)
if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_)
if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_)
return NotImplemented
def rop_real(s, t):
ctx = s.ctx
if not isinstance(t, ctx._types): t = ctx.convert(t)
if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_)
if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero))
return NotImplemented
def lop_complex(s, t):
if isinstance(t, _matrix): return NotImplemented
ctx = s.ctx
if not isinstance(t, s.ctx._types):
try:
t = s.ctx.convert(t)
except (ValueError, TypeError):
return NotImplemented
return g_complex(ctx, s._mpci_, t._mpci_)
def rop_complex(s, t):
ctx = s.ctx
if not isinstance(t, s.ctx._types):
t = s.ctx.convert(t)
return g_complex(ctx, t._mpci_, s._mpci_)
return lop_real, rop_real, lop_complex, rop_complex
ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add)
ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub)
ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul)
ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div)
ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow)
ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__
ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__
class ivmpf_constant(ivmpf):
def __new__(cls, f):
self = new(cls)
self._f = f
return self
def _get_mpi_(self):
prec = self.ctx._prec[0]
a = self._f(prec, round_floor)
b = self._f(prec, round_ceiling)
return a, b
_mpi_ = property(_get_mpi_)
class MPIntervalContext(StandardBaseContext):
def __init__(ctx):
ctx.mpf = type('ivmpf', (ivmpf,), {})
ctx.mpc = type('ivmpc', (ivmpc,), {})
ctx._types = (ctx.mpf, ctx.mpc)
ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {})
ctx._prec = [53]
ctx._set_prec(53)
ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec]
ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx
ctx.pretty = False
StandardBaseContext.__init__(ctx)
ctx._init_builtins()
def _mpi(ctx, a, b=None):
if b is None:
return ctx.mpf(a)
return ctx.mpf((a,b))
def _init_builtins(ctx):
ctx.one = ctx.mpf(1)
ctx.zero = ctx.mpf(0)
ctx.inf = ctx.mpf('inf')
ctx.ninf = -ctx.inf
ctx.nan = ctx.mpf('nan')
ctx.j = ctx.mpc(0,1)
ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp)
ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt)
ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log)
ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos)
ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin)
ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan)
ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma)
ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma)
ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma)
ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial)
ctx.fac = ctx.factorial
ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1))
ctx.pi = ctx._constant(libmp.mpf_pi)
ctx.e = ctx._constant(libmp.mpf_e)
ctx.ln2 = ctx._constant(libmp.mpf_ln2)
ctx.ln10 = ctx._constant(libmp.mpf_ln10)
ctx.phi = ctx._constant(libmp.mpf_phi)
ctx.euler = ctx._constant(libmp.mpf_euler)
ctx.catalan = ctx._constant(libmp.mpf_catalan)
ctx.glaisher = ctx._constant(libmp.mpf_glaisher)
ctx.khinchin = ctx._constant(libmp.mpf_khinchin)
ctx.twinprime = ctx._constant(libmp.mpf_twinprime)
def _wrap_mpi_function(ctx, f_real, f_complex=None):
def g(x, **kwargs):
if kwargs:
prec = kwargs.get('prec', ctx._prec[0])
else:
prec = ctx._prec[0]
x = ctx.convert(x)
if hasattr(x, "_mpi_"):
return ctx.make_mpf(f_real(x._mpi_, prec))
if hasattr(x, "_mpci_"):
return ctx.make_mpc(f_complex(x._mpci_, prec))
raise ValueError
return g
@classmethod
def _wrap_specfun(cls, name, f, wrap):
if wrap:
def f_wrapped(ctx, *args, **kwargs):
convert = ctx.convert
args = [convert(a) for a in args]
prec = ctx.prec
try:
ctx.prec += 10
retval = f(ctx, *args, **kwargs)
finally:
ctx.prec = prec
return +retval
else:
f_wrapped = f
setattr(cls, name, f_wrapped)
def _set_prec(ctx, n):
ctx._prec[0] = max(1, int(n))
ctx._dps = prec_to_dps(n)
def _set_dps(ctx, n):
ctx._prec[0] = dps_to_prec(n)
ctx._dps = max(1, int(n))
prec = property(lambda ctx: ctx._prec[0], _set_prec)
dps = property(lambda ctx: ctx._dps, _set_dps)
def make_mpf(ctx, v):
a = new(ctx.mpf)
a._mpi_ = v
return a
def make_mpc(ctx, v):
a = new(ctx.mpc)
a._mpci_ = v
return a
def _mpq(ctx, pq):
p, q = pq
a = libmp.from_rational(p, q, ctx.prec, round_floor)
b = libmp.from_rational(p, q, ctx.prec, round_ceiling)
return ctx.make_mpf((a, b))
def convert(ctx, x):
if isinstance(x, (ctx.mpf, ctx.mpc)):
return x
if isinstance(x, ctx._constant):
return +x
if isinstance(x, complex) or hasattr(x, "_mpc_"):
re = ctx.convert(x.real)
im = ctx.convert(x.imag)
return ctx.mpc(re,im)
if isinstance(x, basestring):
v = mpi_from_str(x, ctx.prec)
return ctx.make_mpf(v)
if hasattr(x, "_mpi_"):
a, b = x._mpi_
else:
try:
a, b = x
except (TypeError, ValueError):
a = b = x
if hasattr(a, "_mpi_"):
a = a._mpi_[0]
else:
a = convert_mpf_(a, ctx.prec, round_floor)
if hasattr(b, "_mpi_"):
b = b._mpi_[1]
else:
b = convert_mpf_(b, ctx.prec, round_ceiling)
if a == fnan or b == fnan:
a = fninf
b = finf
assert mpf_le(a, b), "endpoints must be properly ordered"
return ctx.make_mpf((a, b))
def nstr(ctx, x, n=5, **kwargs):
x = ctx.convert(x)
if hasattr(x, "_mpi_"):
return libmp.mpi_to_str(x._mpi_, n, **kwargs)
if hasattr(x, "_mpci_"):
re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs)
im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs)
return "(%s + %s*j)" % (re, im)
def mag(ctx, x):
x = ctx.convert(x)
if isinstance(x, ctx.mpc):
return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1
a, b = libmp.mpi_abs(x._mpi_)
sign, man, exp, bc = b
if man:
return exp+bc
if b == fzero:
return ctx.ninf
if b == fnan:
return ctx.nan
return ctx.inf
def isnan(ctx, x):
return False
def isinf(ctx, x):
return x == ctx.inf
def isint(ctx, x):
x = ctx.convert(x)
a, b = x._mpi_
if a == b:
sign, man, exp, bc = a
if man:
return exp >= 0
return a == fzero
return None
def ldexp(ctx, x, n):
a, b = ctx.convert(x)._mpi_
a = libmp.mpf_shift(a, n)
b = libmp.mpf_shift(b, n)
return ctx.make_mpf((a,b))
def absmin(ctx, x):
return abs(ctx.convert(x)).a
def absmax(ctx, x):
return abs(ctx.convert(x)).b
def atan2(ctx, y, x):
y = ctx.convert(y)._mpi_
x = ctx.convert(x)._mpi_
return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec))
def _convert_param(ctx, x):
if isinstance(x, libmp.int_types):
return x, 'Z'
if isinstance(x, tuple):
p, q = x
return (ctx.mpf(p) / ctx.mpf(q), 'R')
x = ctx.convert(x)
if isinstance(x, ctx.mpf):
return x, 'R'
if isinstance(x, ctx.mpc):
return x, 'C'
raise ValueError
def _is_real_type(ctx, z):
return isinstance(z, ctx.mpf) or isinstance(z, int_types)
def _is_complex_type(ctx, z):
return isinstance(z, ctx.mpc)
def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
coeffs = list(coeffs)
num = range(p)
den = range(p,p+q)
#tol = ctx.eps
s = t = ctx.one
k = 0
while 1:
for i in num: t *= (coeffs[i]+k)
for i in den: t /= (coeffs[i]+k)
k += 1; t /= k; t *= z; s += t
if t == 0:
return s
#if abs(t) < tol:
# return s
if k > maxterms:
raise ctx.NoConvergence
# Register with "numbers" ABC
# We do not subclass, hence we do not use the @abstractmethod checks. While
# this is less invasive it may turn out that we do not actually support
# parts of the expected interfaces. See
# http://docs.python.org/2/library/numbers.html for list of abstract
# methods.
try:
import numbers
numbers.Complex.register(ivmpc)
numbers.Real.register(ivmpf)
except ImportError:
pass
| |
#!/usr/bin/env python
# coding: utf-8
"""
A pure python ping implementation using raw sockets.
Note that ICMP messages can only be send from processes running as root
(in Windows, you must run this script as 'Administrator').
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependencies here.
:homepage: https://github.com/jedie/python-ping/
:copyleft: 1989-2011 by the python-ping team, see AUTHORS for more details.
:license: GNU GPL v2, see LICENSE for more details.
"""
import os, sys, socket, struct, select, time, signal
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# ICMP parameters
ICMP_ECHOREPLY = 0 # Echo reply (per RFC792)
ICMP_ECHO = 8 # Echo request (per RFC792)
ICMP_MAX_RECV = 2048 # Max size of incoming buffer
MAX_SLEEP = 1000
def calculate_checksum(source_string):
"""
A port of the functionality of in_cksum() from ping.c
Ideally this would act on the string as a series of 16-bit ints (host
packed), but this works.
Network data is big-endian, hosts are typically little-endian
"""
countTo = (int(len(source_string) / 2)) * 2
sum = 0
count = 0
# Handle bytes in pairs (decoding as short ints)
loByte = 0
hiByte = 0
while count < countTo:
if (sys.byteorder == "little"):
loByte = source_string[count]
hiByte = source_string[count + 1]
else:
loByte = source_string[count + 1]
hiByte = source_string[count]
sum = sum + (ord(hiByte) * 256 + ord(loByte))
count += 2
# Handle last byte if applicable (odd-number of bytes)
# Endianness should be irrelevant in this case
if countTo < len(source_string): # Check for odd length
loByte = source_string[len(source_string) - 1]
sum += ord(loByte)
sum &= 0xffffffff # Truncate sum to 32 bits (a variance from ping.c, which
# uses signed ints, but overflow is unlikely in ping)
sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits
sum += (sum >> 16) # Add carry from above (if any)
answer = ~sum & 0xffff # Invert and truncate to 16 bits
answer = socket.htons(answer)
return answer
class HeaderInformation(dict):
""" Simple storage received IP and ICMP header informations """
def __init__(self, names, struct_format, data):
unpacked_data = struct.unpack(struct_format, data)
dict.__init__(self, dict(zip(names, unpacked_data)))
class Ping(object):
def __init__(self, destination, timeout=1000, packet_size=55, own_id=None):
self.destination = destination
self.timeout = timeout
self.packet_size = packet_size
if own_id is None:
self.own_id = os.getpid() & 0xFFFF
else:
self.own_id = own_id
try:
# FIXME: Use destination only for display this line here? see: https://github.com/jedie/python-ping/issues/3
self.dest_ip = socket.gethostbyname(self.destination)
except socket.gaierror as e:
self.print_unknown_host(e)
sys.exit(-1)
else:
self.print_start()
self.seq_number = 0
self.send_count = 0
self.receive_count = 0
self.min_time = 999999999
self.max_time = 0.0
self.total_time = 0.0
self.delays = []
#--------------------------------------------------------------------------
def init(self):
self.seq_number = 0
self.send_count = 0
self.receive_count = 0
self.min_time = 999999999
self.max_time = 0.0
self.total_time = 0.0
self.delays = []
def print_start(self):
print("\nPYTHON-PING %s (%s): %d data bytes" % (self.destination, self.dest_ip, self.packet_size))
def print_unknown_host(self, e):
print("\nPYTHON-PING: Unknown host: %s (%s)\n" % (self.destination, e.args[1]))
def print_success(self, delay, ip, packet_size, ip_header, icmp_header):
return
if ip == self.destination:
from_info = ip
else:
from_info = "%s (%s)" % (self.destination, ip)
print("%d bytes from %s: icmp_seq=%d ttl=%d time=%.1f ms" % (
packet_size, from_info, icmp_header["seq_number"], ip_header["ttl"], delay)
)
#print("IP header: %r" % ip_header)
#print("ICMP header: %r" % icmp_header)
def print_failed(self):
print("Request timed out.")
def print_exit(self):
return
print("\n----%s PYTHON PING Statistics----" % (self.destination))
lost_count = self.send_count - self.receive_count
#print("%i packets lost" % lost_count)
lost_rate = float(lost_count) / self.send_count * 100.0
print("%d packets transmitted, %d packets received, %0.1f%% packet loss" % (
self.send_count, self.receive_count, lost_rate
))
if self.receive_count > 0:
print("round-trip (ms) min/avg/max = %0.3f/%0.3f/%0.3f" % (
self.min_time, self.total_time / self.receive_count, self.max_time
))
print("")
#--------------------------------------------------------------------------
def signal_handler(self, signum, frame):
return
"""
Handle print_exit via signals
"""
self.print_exit()
print("\n(Terminated with signal %d)\n" % (signum))
sys.exit(0)
def setup_signal_handler(self):
return
signal.signal(signal.SIGINT, self.signal_handler) # Handle Ctrl-C
if hasattr(signal, "SIGBREAK"):
# Handle Ctrl-Break e.g. under Windows
signal.signal(signal.SIGBREAK, self.signal_handler)
#--------------------------------------------------------------------------
def run(self, count=None, deadline=None):
"""
send and receive pings in a loop. Stop if count or until deadline.
"""
self.setup_signal_handler()
while True:
delay = self.do()
self.seq_number += 1
if count and self.seq_number >= count:
break
if deadline and self.total_time >= deadline:
break
if delay == None:
delay = 0
else:
self.delays.append(delay)
# Pause for the remainder of the MAX_SLEEP period (if applicable)
if (MAX_SLEEP > delay):
time.sleep((MAX_SLEEP - delay) / 1000.0)
self.print_exit()
def getAverageDelay(self):
return sum(self.delays)/len(self.delays)
def do(self):
"""
Send one ICMP ECHO_REQUEST and receive the response until self.timeout
"""
try: # One could use UDP here, but it's obscure
current_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp"))
except socket.error, (errno, msg):
if errno == 1:
# Operation not permitted - Add more information to traceback
etype, evalue, etb = sys.exc_info()
evalue = etype(
"%s - Note that ICMP messages can only be send from processes running as root." % evalue
)
raise etype, evalue, etb
raise # raise the original error
send_time = self.send_one_ping(current_socket)
if send_time == None:
return
self.send_count += 1
receive_time, packet_size, ip, ip_header, icmp_header = self.receive_one_ping(current_socket)
current_socket.close()
if receive_time:
self.receive_count += 1
delay = (receive_time - send_time) * 1000.0
self.total_time += delay
if self.min_time > delay:
self.min_time = delay
if self.max_time < delay:
self.max_time = delay
self.print_success(delay, ip, packet_size, ip_header, icmp_header)
return delay
else:
self.print_failed()
def send_one_ping(self, current_socket):
"""
Send one ICMP ECHO_REQUEST
"""
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
checksum = 0
# Make a dummy header with a 0 checksum.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
padBytes = []
startVal = 0x42
for i in range(startVal, startVal + (self.packet_size)):
padBytes += [(i & 0xff)] # Keep chars in the 0-255 range
data = bytes(padBytes)
# Calculate the checksum on the data and the dummy header.
checksum = calculate_checksum(header + data) # Checksum is in network order
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
packet = header + data
send_time = default_timer()
try:
current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP
except socket.error as e:
print("General failure (%s)" % (e.args[1]))
current_socket.close()
return
return send_time
def receive_one_ping(self, current_socket):
"""
Receive the ping from the socket. timeout = in ms
"""
timeout = self.timeout / 1000.0
while True: # Loop while waiting for packet or timeout
select_start = default_timer()
inputready, outputready, exceptready = select.select([current_socket], [], [], timeout)
select_duration = (default_timer() - select_start)
if inputready == []: # timeout
return None, 0, 0, 0, 0
receive_time = default_timer()
packet_data, address = current_socket.recvfrom(ICMP_MAX_RECV)
icmp_header = HeaderInformation(
names=[
"type", "code", "checksum",
"packet_id", "seq_number"
],
struct_format="!BBHHH",
data=packet_data[20:28]
)
if icmp_header["packet_id"] == self.own_id: # Our packet
ip_header = HeaderInformation(
names=[
"version", "type", "length",
"id", "flags", "ttl", "protocol",
"checksum", "src_ip", "dest_ip"
],
struct_format="!BBHHHBBHII",
data=packet_data[:20]
)
packet_size = len(packet_data) - 28
ip = socket.inet_ntoa(struct.pack("!I", ip_header["src_ip"]))
# XXX: Why not ip = address[0] ???
return receive_time, packet_size, ip, ip_header, icmp_header
timeout = timeout - select_duration
if timeout <= 0:
return None, 0, 0, 0, 0
def verbose_ping(hostname, timeout=1000, count=3, packet_size=55):
p = Ping(hostname, timeout, packet_size)
p.run(count)
if __name__ == '__main__':
# FIXME: Add a real CLI
if len(sys.argv) == 1:
print "DEMO"
# These should work:
verbose_ping("heise.de")
verbose_ping("google.com")
# Inconsistent on Windows w/ ActivePython (Python 3.2 resolves correctly
# to the local host, but 2.7 tries to resolve to the local *gateway*)
verbose_ping("localhost")
# Should fail with 'getaddrinfo print_failed':
verbose_ping("foobar_url.foobar")
# Should fail (timeout), but it depends on the local network:
verbose_ping("192.168.255.254")
# Should fails with 'The requested address is not valid in its context':
verbose_ping("0.0.0.0")
elif len(sys.argv) == 2:
verbose_ping(sys.argv[1])
else:
print "Error: call ./ping.py domain.tld"
| |
# -*- coding: utf-8 -*-
"""
##################################################################################
# PipeFFmpeg v0.1.1
#
# Copyright (C) 2011 KATO Kanryu <k.kanryu@gmail.com>
#
##################################################################################
# This file is distibuted under 3-BSD
# See COPYING file attached.
##################################################################################
#
# TODO:
# * transcode to other codec which has ffmpeg-command
# * support audio-stream
#
# Abilities
# * Get version from ffmpeg-command on your system
# * Get codecs
# * Get formats
# * Get pix_fmts
# * Get metadata from a video file
# * Read frames from a video
# * Write a video within post frames in Python
"""
import subprocess as sp
import os
import sys
import ctypes
FFMPEG_BIN = 'ffmpeg'
"""ffmpeg's path
if ffmpeg command doesn't exist in PATHs, you should change this."""
FFMPEG_DETECTED = False
"""FFMPEG_BIN is detected?
detecting is only 1 time.
"""
FFPROBE_BIN = 'ffprobe'
"""ffprobe's path"""
FFPROBE_DETECTED = False
FFPROBE_EXISTS = False
def _attempt_bin(bin):
global FFMPEG_DETECTED
if FFMPEG_DETECTED: return
try:
p = sp.Popen(
bin,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
del p
return True
except EnvironmentError:
return False
def _attempt_ffmpeg():
global FFMPEG_DETECTED
if FFMPEG_DETECTED: return
try:
p = sp.Popen(
FFMPEG_BIN,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
del p
FFMPEG_DETECTED = True
except EnvironmentError:
print "pyffmpeg: you should set pyffmpeg.FFMPEG_BIN as a valid 'ffmpeg' command path"
raise
def _attempt_ffprobe():
global FFPROBE_DETECTED, FFPROBE_EXISTS
if FFPROBE_DETECTED: return
if _attempt_bin(FFPROBE_BIN): FFPROBE_EXISTS = True
FFPROBE_DETECTED = True
def get_pipe2(bin=FFMPEG_BIN, option=None):
'''get pipes from ffmpeg process'''
_attempt_ffmpeg()
cmd = [bin]
if option:
if type(option) == str:
cmd.append(option)
if type(option) == list:
cmd += option
return sp.Popen(
cmd,
stdin=sp.PIPE,
stdout=sp.PIPE,
)
def get_pipe3(option=None):
'''get pipes from ffmpeg process with stderr'''
_attempt_ffmpeg()
cmd = [FFMPEG_BIN]
if option:
if type(option) == str:
cmd.append(option)
if type(option) == list:
cmd += option
return sp.Popen(
cmd,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
def _plugins_gen(option, sep=' ------', stdpipe='stderr'):
p = get_pipe3(option)
first_skip = True
if stdpipe == 'stdin': stdpipe = p.stdin
if stdpipe == 'stdout': stdpipe = p.stdout
if stdpipe == 'stderr': stdpipe = p.stderr
for line in stdpipe.readlines():
line = line.rstrip()
if first_skip:
if line[:len(sep)] == sep: first_skip = False
continue
if line == '': break
yield line
del p
class Codec:
'''video/audio/subtitle codecs supported by ffmpeg'''
types = {'V': 'video', 'A': 'audio', 'S': 'subtitle', 'D': 'binary_data'}
def __init__(self, line):
self.decoding = line[1] == 'D'
self.encoding = line[2] == 'E'
self.type = Codec.types[line[3]]
self.draw_horiz_band = line[4] == 'S'
self.direct_rendering = line[5] == 'D'
self.frame_truncation = line[6] == 'T'
self.name = line[8:]
def __repr__(self):
return '<Codec %s for %s>' % (self.name, self.type)
def get_codecs():
'''get codecs for ffmpeg'''
result = {}
for line in _plugins_gen('-codecs', sep=' ------', stdpipe='stdout'):
print line
result[line[8:]] = Codec(line)
return result
class Format:
'''file formats supported by ffmpeg'''
def __init__(self, line):
self.demuxing = line[1] == 'D'
self.muxing = line[2] == 'E'
self.name = line[4:]
def __repr__(self):
muxing = ''
if self.demuxing: muxing += 'D'
if self.muxing: muxing += 'E'
return '<Format %s %s>' % (self.name, muxing)
def get_formats():
'''get codecs for ffmpeg'''
result = {}
for line in _plugins_gen('-formats', sep=' --', stdpipe='stdout'):
result[line[4:]] = Format(line)
return result
class PixelFormat:
'''pixel format and bit per pixels for each pixel'''
def __init__(self, line):
self.input = line[0] == 'I'
self.output = line[1] == 'O'
self.hardware = line[2] == 'H'
self.paletted = line[3] == 'P'
self.bitstream = line[5] == 'B'
options = [t for t in line[8:].split(' ') if t != '']
self.name, self.components, self.bpp = options[0], int(options[1]), int(options[2])
def __repr__(self):
io = 'I' if self.input else '.'
io += 'O' if self.output else '.'
return '<PixelFormat %s %s %d %d>' % (self.name, io, self.components, self.bpp)
def get_pixel_formats():
'''get pix_fmts for ffmpeg'''
result = {}
for line in _plugins_gen('-pix_fmts', sep='-----', stdpipe='stdout'):
pix = PixelFormat(line)
result[pix.name] = pix
return result
def get_ffmpeg_version():
"""get versions about ffmpeg and lib**
e.g.
FFmpeg SVN-r26400
libavutil 50.36. 0 / 50.36. 0
libavcore 0.16. 1 / 0.16. 1
libavcodec 52.108. 0 / 52.108. 0
libavformat 52.93. 0 / 52.93. 0
libavdevice 52. 2. 3 / 52. 2. 3
libavfilter 1.74. 0 / 1.74. 0
libswscale 0.12. 0 / 0.12. 0
libpostproc 51. 2. 0 / 51. 2. 0
"""
p = get_pipe3('-version')
result = {}
for line in p.stdout.readlines():
line = line.rstrip()
idx = line.find(' ')
name = line[:idx]
version = line[idx:].lstrip()
result[name] = version
del p
return result
def get_ffmpeg_info():
"""get infomation about ffmpeg(included versions)
e.g.:
FFmpeg version SVN-r26400, Copyright (c) 2000-2011 the FFmpeg developers
built on Jan 17 2011 22:59:06 with gcc 4.5.2
configuration: --enable-memalign-hack --enable-gpl --enable-version3 --enable-postproc --enable-li
bopencore-amrnb --enable-libopencore-amrwb --enable-libgsm --enable-libmp3lame --enable-librtmp --en
able-libvorbis --enable-libtheora --enable-libxvid --enable-libvpx --enable-libx264 --disable-ffserv
er --disable-ffplay --disable-ffprobe --enable-avisynth --enable-small --enable-w32threads --extra-l
dflags=-static --extra-cflags='-mtune=core2 -mfpmath=sse -msse -fno-strict-aliasing'
libavutil 50.36. 0 / 50.36. 0
libavcore 0.16. 1 / 0.16. 1
libavcodec 52.108. 0 / 52.108. 0
libavformat 52.93. 0 / 52.93. 0
libavdevice 52. 2. 3 / 52. 2. 3
libavfilter 1.74. 0 / 1.74. 0
libswscale 0.12. 0 / 0.12. 0
libpostproc 51. 2. 0 / 51. 2. 0
Hyper fast Audio and Video encoder
usage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}...
Use -h to get full help or, even better, run 'man ffmpeg'
"""
p = get_pipe3()
result = {}
for line in p.stderr.readlines():
if line[:6] == 'FFmpeg':
result['FFmpeg'] = line[15:line.find(',')]
continue
if line[2:2+5] == 'built':
result['built'] = line[11:].rstrip()
continue
if line[2:15] == 'configuration':
result['configuration'] = line[17:].rstrip()
continue
if line[2:5] == 'lib':
line = line[2:].rstrip()
idx = line.find(' ')
name = line[:idx]
result[name] = line[idx:].lstrip()
continue
del p
return result
def get_info(path_of_video):
"""get infomation of the video for ffmpeg
e.g.:
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'your_video.mp4':
Metadata:
major_brand : isom
minor_version : 1
compatible_brands: isomavc1
creation_time : 2010-11-20 10:39:32
Duration: 00:01:15.26, start: 0.000000, bitrate: 602 kb/s
Stream #0.0(und): Video: h264, yuv420p, 512x384, 511 kb/s, 30 fps, 30 tbr, 30k tbn, 60 tbc
Metadata:
creation_time : 2010-11-20 10:39:32
Stream #0.1(und): Audio: aac, 48000 Hz, stereo, s16, 88 kb/s
Metadata:
creation_time : 2010-11-20 10:39:32
to
ffprobe: {'TAG:encoder': 'Lavf52.102.0', 'format_long_name': 'QuickTime/MPEG-4/Motion JPEG 2000 format',
'start_time': '0.000000', 'nb_streams': '1', 'TAG:creation_time': '1970-01-01 00:00:00',
'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'filename': 'test.mp4', 'TAG:compatible_brands': 'isomiso2
avc1mp41', 'bit_rate': '489116.000000', 'streams': [{'pix_fmt': 'yuv420p', 'index': '0', 'TAG:langua
ge': 'und', 'codec_tag': '0x31637661', 'r_frame_rate': '30/1', 'start_time': '0.000000', 'time_base'
: '1/30', 'codec_tag_string': 'avc1', 'codec_type': 'video', 'has_b_frames': '0', 'width': '352', 'T
AG:creation_time': '1970-01-01 00:00:00', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part
10', 'codec_name': 'h264', 'duration': '10.000000', 'height': '240', 'nb_frames': '300', 'codec_tim
e_base': '1/60', 'avg_frame_rate': '30/1'}], 'duration': '10.000000', 'TAG:major_brand': 'isom', 'TA
G:minor_version': '512', 'size': '611396.000000'}
or
ffmpeg -i {'duration': {'duration': '00:01:15.26', 'start': '0.000000', 'bitrate': '602 kb/s', 'streams': [{'p
ix_fmt': 'yuv420p', 'bitrate': '511 kb/s', 'tbr': '30 tbr', 'raw': ['Video', 'h264', 'yuv420p', '512
x384', '511 kb/s', '30 fps', '30 tbr', '30k tbn', '60 tbc'], 'codec': 'h264', 'fps': '30 fps', 'tbn'
: '30k tbn', 'tbc': '60 tbc', 'type': 'Video', 'size': '512x384'}, {'Hz': '48000 Hz', 'ch': 'stereo'
, 'bitrate': '88 kb/s', 'smp_fmt': 's16', 'raw': ['Audio', 'aac', '48000 Hz', 'stereo', 's16', '88 k
b/s'], 'codec': 'aac', 'type': 'Audio'}]}, 'metadata': {'major_brand': 'isom', 'creation_time': '201
0-11-20 10:39:32', 'compatible_brands': 'isomavc1', 'minor_version': '1'}}
"""
global FFMPEG_DETECTED, FFPROBE_EXISTS
_attempt_ffprobe()
if FFPROBE_EXISTS:
p = sp.Popen(
[FFPROBE_BIN, '-show_format', '-show_streams', path_of_video],
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
# result = []
result = dict(streams=[])
stream = {}
is_stream = is_format = False
for line in p.stdout.readlines():
#result.append(line)
line = line.strip()
if line == '[STREAM]':
is_stream = True
continue
if line == '[/STREAM]':
is_stream = False
result['streams'].append(stream)
stream = {}
continue
if line == '[FORMAT]':
is_format = True
continue
if line == '[/FORMAT]':
break
tokens = line.split('=')
if is_stream: stream[tokens[0]] = tokens[1]
if is_format: result[tokens[0]] = tokens[1]
print result
return result
result = {}
at_metadata = True
metadata = {}
duration = {}
for line in _plugins_gen(['-i', '"%s"' % path_of_video], sep=' libpostproc'):
line = line.lstrip()
if line[:5] == 'Input': continue
if line == 'Metadata:': continue
tokens = line.lstrip().split(': ')
if at_metadata:
if tokens[0] != 'Duration':
metadata[tokens[0].rstrip()] = tokens[1]
continue
at_metadata = False
if tokens[0] == 'Duration':
duration['duration'] = tokens[1][:-7]
duration['start'] = tokens[2][:-9]
duration['bitrate'] = tokens[3]
duration['streams'] = [] # Video.. Audio..
if line[:6] == 'Stream':
stream = {}
submeta = tokens[2].split(', ')
stream['raw'] = [tokens[1]] + submeta # ['Video', 'h264', 'yuv420p', '352x240', '486 kb/s', '30 fps', '30 tbr', '30 tbn', '60 tbc']
stream['type'] = tokens[1] # Video
if stream['type'] == 'Audio':
stream['codec'] = submeta[0] # aac
stream['Hz'] = submeta[1] # 48000 Hz
stream['ch'] = submeta[2] # stereo
stream['smp_fmt'] = submeta[3] # s16
stream['bitrate'] = submeta[4] # 88 kb/s
else:
stream['codec'] = submeta[0] # h264
stream['pix_fmt'] = submeta[1] # yuv420p
stream['size'] = submeta[2] # 352x240
stream['bitrate'] = submeta[3] # 486 kb/s
stream['fps'] = submeta[4] # 30 fps
stream['tbr'] = submeta[5] # 30 tbr
stream['tbn'] = submeta[6] # 30 tbn
stream['tbc'] = submeta[7] # 60 tbc
duration['streams'].append(stream)
return dict(metadata=metadata, duration=duration)
class BitmapFileHeader(ctypes.LittleEndianStructure):
_pack_ = 2
_fields_ = [
('bfType', ctypes.c_int16),
('bfSize', ctypes.c_int32),
('bfRsv1', ctypes.c_int16),
('bfRsv2', ctypes.c_int16),
('bfOffBits', ctypes.c_int32),
]
def sread(fd,cobj):
ctypes.memmove(ctypes.pointer(cobj),ctypes.c_char_p(fd.read(ctypes.sizeof(cobj))),ctypes.sizeof(cobj))
class InputVideoStream:
"""to read a video to writeout by frames and audio stream"""
def __init__(self, path=None):
self.rate = 15
self.ivcodec = 'bmp'
self.filepath = 'test.mp4'
self.frames = 10
self.iformat = 'image2pipe'
def open(self, path):
self.filepath = path
cmd = [
FFMPEG_BIN,
'-i', self.filepath,
'-f', self.iformat,
'-vcodec', self.ivcodec,
'-' # it means output to pipe
]
self.p = sp.Popen(
cmd,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
def readframe(self):
"""post each frame as bmp image(iterator)"""
while True:
bmfheader = BitmapFileHeader()
sread(self.p.stdout, bmfheader)
if bmfheader.bfType != 0x4d42: # the last frame is appended with some broken bytes
break
# reconvert to python string
bmp = ctypes.string_at(ctypes.pointer(bmfheader), ctypes.sizeof(bmfheader))
# BitmapFileHeader and rest data
bmp += self.p.stdout.read(bmfheader.bfSize - ctypes.sizeof(bmfheader))
yield bmp
self.p.stdin.close()
del self.p
class OutVideoStream:
"""to write a video with posting each frame"""
def __init__(self, path=None):
self.ipix_fmt = 'rgb24'
self.iformat = 'rawvideo'
self.filepath = 'test2.avi'
if path: self.filepath = path
self.isize = '352x240'
self.opix_fmt = 'bgr24'
self.frames = 10
self.rate = 25
self.oformat = 'avi' # 'mp4'
self.ocodec = 'rawvideo' # 'huffyuv'
def open(self, path):
self.filepath = path
cmd = [
FFMPEG_BIN,
'-y',
'-pix_fmt', self.ipix_fmt,
'-f', self.iformat,
'-s', self.isize,
'-i', '-',
'-an',
'-vf', 'vflip',
'-pix_fmt', self.opix_fmt,
'-f', self.oformat,
'-r', str(self.rate),
'-vcodec', self.ocodec,
self.filepath,
]
self.p = sp.Popen(
cmd,
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE,
)
def writeframe(self, frameraw):
self.p.stdin.write(frameraw)
def close(self):
if hasattr(self, 'p'):
self.p.stdin.close()
del self.p
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(self, 'p'):
self.close()
if __name__ == '__main__':
print 'version:', get_ffmpeg_version()
print 'info:', get_ffmpeg_info()
print 'codecs:', get_codecs()
print 'formats:', get_formats()
print 'pix_fmts:', get_pixel_formats()
print 'info of video:', get_info('./test.mp4')
from PIL import Image
import BmpImagePlugin
import cStringIO as StringIO
# read a mp4 to output bmp files
iv = InputVideoStream()
iv.open('./test.mp4')
pathformat = "%04d.bmp"
for i, bmp in enumerate(iv.readframe()):
path = pathformat % i
image = Image.open(StringIO.StringIO(bmp))
image.save(path)
frames = i + 1 # count = last + 1
# write a avi within reading each frame from bmp file
ov = OutVideoStream()
with ov:
ov.rate=30
ov.open('test.avi')
for i in range(frames):
path = pathformat % i
image = Image.open(path)
ov.writeframe(image.tobytes())
# ov.close() #in 'with' statements, auto-called
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Gamma(distribution.ContinuousDistribution):
"""The `Gamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)(x^(alpha-1))e^(-x*beta)/Gamma(alpha), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta * x) / Gamma(alpha), x > 0```
where GammaInc is the incomplete lower Gamma function.
Examples:
```python
dist = Gamma(alpha=3.0, beta=2.0)
dist2 = Gamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self, alpha, beta, name="Gamma"):
"""Construct Gamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: `float` or `double` tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: `float` or `double` tensor, the inverse scale params of the
distribution(s).
beta must contain only positive values.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
with ops.op_scope([alpha, beta], name) as scope:
self._name = scope
with ops.control_dependencies([
check_ops.assert_positive(alpha), check_ops.assert_positive(beta)]):
alpha = array_ops.identity(alpha, name="alpha")
beta = array_ops.identity(beta, name="beta")
contrib_tensor_util.assert_same_float_dtype((alpha, beta))
self._broadcast_tensor = alpha + beta
self._get_batch_shape = self._broadcast_tensor.get_shape()
self._get_event_shape = tensor_shape.TensorShape([])
self._alpha = alpha
self._beta = beta
@property
def name(self):
"""Name to prepend to all ops."""
return self._name
@property
def dtype(self):
"""dtype of samples from this distribution."""
return self._alpha.dtype
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Inverse scale parameter."""
return self._beta
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([self._broadcast_tensor], name):
return array_ops.shape(self._broadcast_tensor)
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_event_shape
def mean(self, name="mean"):
"""Mean of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta], name):
return self._alpha / self._beta
def mode(self, name="mode"):
"""Mode of each batch member. Defined only if alpha >= 1."""
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.op_scope([alpha, beta], name):
alpha_ge_1 = alpha >= 1.0
mode_if_defined = (alpha - 1.0) / beta
nan = np.nan * self._ones()
return math_ops.select(alpha_ge_1, mode_if_defined, nan)
def variance(self, name="variance"):
"""Variance of each batch member."""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta], name):
return self._alpha / math_ops.square(self._beta)
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta], name):
return math_ops.sqrt(self._alpha) / self._beta
def log_pdf(self, x, name="log_pdf"):
"""Log pdf of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_pdf: tensor of dtype `dtype`, the log-PDFs of `x`.
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(
[check_ops.assert_positive(x)], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
return (alpha * math_ops.log(beta) + (alpha - 1) * math_ops.log(x) -
beta * x - math_ops.lgamma(self._alpha))
def pdf(self, x, name="pdf"):
"""Pdf of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the PDFs of `x`
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.exp(self.log_pdf(x))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies(
[check_ops.assert_positive(x)], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.log(math_ops.igamma(self._alpha, self._beta * x))
def cdf(self, x, name="cdf"):
"""CDF of observations `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
return math_ops.igamma(self._alpha, self._beta * x)
def entropy(self, name="entropy"):
"""The entropy of Gamma distribution(s).
This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.alpha, self._beta], name):
alpha = self._alpha
beta = self._beta
return (alpha - math_ops.log(beta) + math_ops.lgamma(alpha) +
(1 - alpha) * math_ops.digamma(alpha))
def sample(self, n, seed=None, name="sample"):
"""Draws `n` samples from the Gamma distribution(s).
See the doc for tf.random_gamma for further detail.
Args:
n: Python integer, the number of observations to sample from each
distribution.
seed: Python integer, the random seed for this operation.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
"""
with ops.op_scope([n, self.alpha, self._beta], self.name):
return random_ops.random_gamma([n],
self.alpha,
beta=self._beta,
dtype=self.dtype,
seed=seed,
name=name)
@property
def is_reparameterized(self):
return False
def _ones(self):
return array_ops.ones_like(self._alpha + self._beta, dtype=self.dtype)
| |
from __future__ import unicode_literals
import decimal
import operator
import warnings
from wtforms import fields, widgets
from wtforms.compat import text_type, string_types
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, (self.data.key() == obj.key()) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(KeyPropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key.id()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key.id())
label = self.get_label(obj)
yield (key, label, (self.data.key == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data.key == obj.key:
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and text_type("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and unicode("\n".join(self.data)) or u''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext(u'Not a valid integer list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = '%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError('Not a valid coordinate location')
| |
# -*- coding: utf-8 -*-
from itertools import groupby
import json
import os
# Constants
C_SHARP_TYPES = {
"integer": "int?",
"string": "string",
"boolean": "bool?",
"number": "uint?",
}
# Here JResponses push responses represented by a struct in C#
STRUCT_RESPONSES = dict()
# Special responses that should be stored in specially created classes
SPECIAL_RESPONSES = dict()
# Enums keys
PSEUDO_ENUMS = list()
# Example: pep_eight -> PepEight
def to_camel_case(string):
ls = list(string.split('_'))
for i, v in enumerate(ls):
tmp = list(ls[i])
tmp[0] = tmp[0].upper()
ls[i] = "".join(tmp)
return "".join(ls)
# Loads JSON object from a file
def read_file(filename):
with open(filename, 'rt', encoding='utf8') as file:
j_object = json.load(file)
return j_object
# Generates class instances.
def generate_instances():
ls = input("Enter classnames separated by semicolons:\n").split(";")
for item in ls:
string = "/// <summary>\n" \
"/// {0} API section.\n" \
"/// </summary>\n" \
"public {0} {0}\n{{\n get {{ return new {0}(this); }}\n}}\n\n".format(item)
print(string)
# Generates classes from JSON
class JObjects:
def __init__(self, file, save=False, output=False):
print("Reading file {}...".format(file))
j_object = read_file(filename=file)
print("Parsing methods...")
definitions = j_object["definitions"]
errors = 0
# Find enums and form their list
for key in definitions:
prop = definitions[key]
enum = prop.get("enum", None)
if enum:
PSEUDO_ENUMS.append(key)
# Iterate and generate
for value in definitions:
# Form file structure
content = self.__parse_object(definitions[value], value)
# Check for NULL
if content is None:
errors += 1
continue
# Split data
item_data = list(value.split("_"))
# Get vital params
folder_name = item_data.pop(0).capitalize()
for index, item in enumerate(item_data):
item_data[index] = item.capitalize()
class_name = str().join(item_data)
# Generate output
if output:
print(content)
# Generate files
if save:
print("Saving...")
filename = "VkLib.Core/Types/{}/{}.cs".format(folder_name, class_name)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w+") as file:
file.write(content)
print("Saved to {}!".format(filename))
print("Total objects viewed: {}".format(len(definitions)))
print("Errors: {}".format(errors))
print("Parsed: {}".format(len(definitions) - errors))
@classmethod
def __parse_object(cls, item, key):
# Get sub values
properties = item.get("properties", None)
enum = item.get("enum", None)
all_of = item.get("allOf", None)
# Check and parse
try:
# Get inner props
if properties:
out = cls.__write_title(str(), key, "")
out += cls.__parse_properties(properties, key)
out += " }\n}\n"
return out
elif all_of:
out = cls.__parse_children(all_of, key)
return out
elif enum:
return None
else:
raise KeyError("Unable to resolve this method.")
except Exception:
# Write exception
from traceback import print_exc
print_exc()
return None
@classmethod
def __parse_children(cls, all_of, key):
print(all_of)
# Get and write name of a parent
first = all_of[0]
ref = first.get("$ref", None)
if ref:
# Prepare Title
ls = ref.split("/")[2].split("_")
category = ls.pop(0).capitalize()
classname = to_camel_case("_".join(ls))
title = cls.__write_title(str(), key, " : VkLib.Types.{}.{}".format(category, classname))
# Parse properties
properties = all_of[1]["properties"]
output = cls.__parse_properties(properties, key)
# Return content
return title + output + " }\n}\n"
@classmethod
def __parse_property(cls, data, name, key):
# Get data
data_type = data.get("type", None)
ref = data.get("$ref", None)
array = data.get("allOf", None)
# Check type
if data_type:
# Fix tuples
if isinstance(data_type, list):
data_type = "string"
# Generate data in correct form
if data_type == "array":
data_type = data["items"].get("$ref", None)
if not data_type:
data_type = C_SHARP_TYPES[data["items"]["type"]]
else:
data_type = cls.__type_from_ref(data_type)
data_type = "IEnumerable<{}>".format(data_type)
elif data_type == "object":
data_type = "object"
else:
data_type = C_SHARP_TYPES[data_type]
# Class type
elif ref:
data_type = cls.__type_from_ref(ref)
if "base_bool_int" in ref or "base_ok_response" in ref:
data_type = "int?"
# Array
elif array:
data_type = "IEnumerable<{}>".format("object")
# Camel
camel_name = to_camel_case(name)
# Fuck equality
ls = key.split("_")
ls.pop(0)
if len(ls) == 1 and ls[0] == name:
camel_name += "_"
elif ls == name.split("_"):
camel_name += "_"
# Write text
var = " /// <summary>\n" \
" /// {0}\n" \
" /// </summary>\n" \
" [JsonProperty(\"{3}\")]\n" \
" public {1} {2} {{ get; set; }}\n\n" \
.format(
data.get("description", "Property"),
data_type,
camel_name,
name
)
return var
@classmethod
def __parse_properties(cls, properties, key):
output = str()
for name in properties:
output += cls.__parse_property(properties[name], name, key)
return output
@classmethod
def __write_title(cls, string, item, parent):
# Split to list
ls = list(item.split('_'))
for i, v in enumerate(ls):
ls[i] = v.capitalize()
category = ls.pop(0)
title = "".join(ls)
# Write text
string += "using System;\n" \
"using Newtonsoft.Json;\n" \
"using System.Collections.Generic;\n" \
"\n" \
"namespace VkLib.Types.{} \n{{\n".format(category)
string += " public class {}{}\n" \
" {{\n".format(title, parent)
return string
@staticmethod
def __type_from_ref(string):
# Form camel casic string
s = string.split("/")[2]
if s in PSEUDO_ENUMS:
if s == "base_bool_int" or s == "base_ok_response":
return "int?"
else:
return "string"
else:
ls = list(s.split('_'))
category = ls.pop(0).capitalize()
classname = to_camel_case("_".join(ls))
return "VkLib.Types.{}.{}".format(category, classname)
# Generates Methods from JSON
class JMethods:
def __init__(self, file, save=False, output=False):
# Read file
print("Reading file {}...".format(file))
j_object = read_file(filename=file)
# Get objects
print("Parsing methods...")
methods = j_object["methods"]
errors = 0
# Group items by methods sections
grouping = {
k: list(v) for k, v in groupby(
methods,
key=lambda x: x["name"].split(".")[0].capitalize()
)
}
# Iterate through grouping
for method_group in grouping:
values = grouping[method_group]
heading = """using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace VkLib.Methods
{{
/// <summary>
/// {0} API section.
/// </summary>
public class {0}
{{
private Vkontakte _vkontakte;
internal {0}(Vkontakte vkontakte)
{{
_vkontakte = vkontakte;
}}
""".format(method_group)
mid = str()
# Parse methods
for item in values:
mid += self.__parse_method(item)
ending = "\n }\n}\n"
content = heading + mid + ending
filename = "VkLib.Core/Methods/{}.cs".format(method_group)
# Generate path
os.makedirs(os.path.dirname(filename), exist_ok=True)
# Save to file
if save:
with open(filename, "w+") as file:
file.write(content)
# Output to console
if output:
print(content)
# Log results
print("Total methods parsed: {}".format(len(methods)))
print("Errors: {}".format(errors))
print("Parsed: {}".format(len(methods) - errors))
@classmethod
def __parse_method(cls, item):
try:
output = ""
output = cls.__write_docs(output, item)
output = cls.__prepare_name(output, item)
output = cls.__write_variables(output, item)
output = cls.__create_dictionary(output, item)
output = cls.__write_query(output, item)
return output
except Exception:
from traceback import print_exc
print_exc()
return None
@classmethod
def __write_docs(cls, string, item):
# Exclude words-helpers
name = item.get("name", "")
if name == "long" or name == "out" or name == "object":
name = "{}_".format(name)
# Concat
string += "\n" \
" /// <summary>\n" \
" /// {0}\n" \
" /// Docs: <see href=\"https://vk.com/dev/{1}\">{1}</see>\n" \
" /// </summary>\n" \
.format(
item.get("description", ""),
name
)
# Print parameters
variables = item.get("parameters")
if variables:
for index, value in enumerate(variables):
string += " /// <param name=\"{}\">{}</param>\n" \
.format(
value["name"],
value["description"] if "description" in value else ""
)
return string
@classmethod
def __prepare_name(cls, string, item):
# Form method name
name = item["name"].split(".")[1]
name_list = list(name)
name_list[0] = name_list[0].upper()
name = str().join(name_list)
# Generate path
path = cls.__generate_response_path(item)
# Combine to path
string += " public async Task<{}> {}(".format(path, name)
return string
@staticmethod
def __generate_response_path(item):
response_type = item["responses"]["response"]["$ref"].split("/")[2]
if response_type in STRUCT_RESPONSES:
path = STRUCT_RESPONSES[response_type]
elif response_type in PSEUDO_ENUMS:
path = "string"
else:
ls = response_type.split("_")
category = ls.pop(0)
response_name = to_camel_case("_".join(ls))
path = "VkLib.Responses.{}.{}".format(category.capitalize(), response_name)
return path
@classmethod
def __write_variables(cls, string, item):
variables = item.get("parameters", None)
if variables:
for index, value in enumerate(variables):
json_type = value["type"]
if json_type == "array":
json_type = "IEnumerable<{}>".format(
C_SHARP_TYPES[value["items"]["type"]]
)
else:
json_type = C_SHARP_TYPES[json_type]
# Exclude words-helpers
name = value["name"]
if name == "long" or name == "out" or name == "object":
name = "{}_".format(name)
# Concat
string += "{} {} = null{}".format(
json_type,
name,
", " if len(variables) - 1 > index else ")\n {\n"
)
else:
string += ")\n {\n"
return string
@classmethod
def __create_dictionary(cls, string, item):
string += " Dictionary<string, string> parameters = new Dictionary<string, string>();\n\n"
variables = item.get("parameters", None)
if variables:
for index, value in enumerate(variables):
# Exclude words-helpers
name = value["name"]
if name == "long" or name == "out" or name == "object":
name = "{}_".format(name)
# Write
json_type = value["type"]
if json_type == "array":
converted = "string.Join(\",\", {})".format(name)
elif json_type == "string":
converted = name
else:
converted = "{}.ToString()".format(name)
# Concat
string += " if ({0} != null)\n" \
" parameters.Add(\"{0}\", {1});\n"\
.format(
name,
converted
)
return string
@classmethod
def __write_query(cls, string, item):
# Extract response types
path = cls.__generate_response_path(item)
# Concat
string += "\n return await _vkontakte.GetAsync<{}>(\"{}\", parameters);\n" \
.format(path, item["name"]) + " }\n"
return string
# Generates response classes from JSON
class JResponses:
def __init__(self, file, save=False, output=False):
print("Reading file {}...".format(file))
j_response = read_file(filename=file)
print("Parsing methods...")
definitions = j_response["definitions"]
errors = 0
# Parse regular responses
for key in definitions:
self.__parse_response(definitions[key], key)
# Parse special responses
for key in definitions:
# Get content
content = self.__parse_special_response(definitions[key], key)
# Is this real?
if content:
# Get variables
ls = key.split("_")
foldername = ls.pop(0).capitalize()
classname = to_camel_case("_".join(ls))
# Generate path
filename = "VkLib.Core/Responses/{}/{}.cs".format(foldername, classname)
os.makedirs(os.path.dirname(filename), exist_ok=True)
# Output
if output:
print(content)
# Save to file
if save:
with open(filename, "w+") as file:
file.write(content)
# Log
print("Total responses viewed: {}".format(len(definitions)))
print("Errors: {}".format(errors))
print("Parsed: {}".format(len(definitions) - errors))
@classmethod
def __parse_special_response(cls, item, name):
if not STRUCT_RESPONSES.get(name, None):
properties = item["properties"]["response"]["properties"]
ls = name.split("_")
category = ls.pop(0).capitalize()
classname = to_camel_case("_".join(ls))
# Write header
string = "using System;\n" \
"using System.Collections.Generic;\n" \
"using Newtonsoft.Json;\n" \
"using VkLib.Types;\n" \
"\n" \
"namespace VkLib.Responses.{}\n" \
"{{\n" \
" public class {}\n" \
" {{\n".format(category, classname)
# Iterate through properties
for key in properties:
value = properties[key]
# Parse types
_type = value.get("type", None)
_ref = value.get("$ref", None)
if _type:
if _type == "array":
# Determine type
_ref = value["items"].get("$ref")
_type = value["items"].get("type")
if _ref:
# From path
ls = _ref.split("/")[2].split("_")
category = ls.pop(0).capitalize()
full_path = to_camel_case("_".join(ls))
inner_type = "VkLib.Types.{}.{}".format(category, full_path)
else:
# Enumerable
if _type == "array":
inner_type = "IEnumerable<{}>".format(C_SHARP_TYPES["integer"])
else:
inner_type = C_SHARP_TYPES[_type]
# Fomat ready type
realtype = "IEnumerable<{}>".format(inner_type)
else:
# Get original type
realtype = C_SHARP_TYPES[_type]
elif _ref:
if "base_bool_int" in _ref or "base_ok_response" in _ref:
realtype = "int"
else:
ls = _ref.split("/")[2].split("_")
category = ls.pop(0).capitalize()
full_path = to_camel_case("_".join(ls))
realtype = "VkLib.Types.{}.{}".format(category, full_path)
description = value.get("description", None)
# Docs
string += " /// <summary>\n" \
" /// {}\n" \
" /// </summary>\n" \
" [JsonProperty(\"{}\")]\n" \
" public {} {} {{ get; set; }}\n\n"\
.format(
description,
key,
realtype,
to_camel_case(key)
)
# Write ending
string += " }\n" \
"}\n"
return string
@classmethod
def __parse_response(cls, item, name):
# Extract response
response = item["properties"]["response"]
# Check fields existance
ref = response.get("$ref", None)
items = response.get("items", None)
properties = response.get("properties", None)
pattern_properties = response.get("patternProperties", None)
if ref:
# Work with clean and good responses
if "base_ok_response" in ref or "base_bool_int" in ref:
STRUCT_RESPONSES[name] = "int"
else:
ls = ref.split("/")[2].split("_")
_type = ls.pop(0).capitalize()
_name = to_camel_case("_".join(ls))
STRUCT_RESPONSES[name] = "VkLib.Types.{}.{}".format(_type, _name)
elif items:
# Generate ItemsResponses
ref = items.get("$ref", None)
_type = items.get("type", None)
if ref:
ls = ref.split("/")[2].split("_")
_type = ls.pop(0).capitalize()
_name = to_camel_case("_".join(ls))
STRUCT_RESPONSES[name] = "IEnumerable<VkLib.Types.{}.{}>".format(_type, _name)
elif _type:
sharp_type = C_SHARP_TYPES.get(_type)
if sharp_type:
STRUCT_RESPONSES[name] = "IEnumerable<{}>".format(sharp_type)
else:
STRUCT_RESPONSES[name] = "IEnumerable<IEnumerable<string>>"
else:
STRUCT_RESPONSES[name] = "IEnumerable<object>"
elif properties:
# Check for ApiItemsResponse
if properties.get("items", None):
items = properties["items"]
ref = items["items"].get("$ref", None)
if ref:
ls = ref.split("/")[2].split("_")
category = ls.pop(0).capitalize()
_name = to_camel_case("_".join(ls))
STRUCT_RESPONSES[name] = "ApiItemsResponse<VkLib.Types.{}.{}>".format(
category,
_name
)
else:
_type = items["items"].get("type", None)
if _type == "integer":
STRUCT_RESPONSES[name] = "ApiItemsResponse<{}>".format(
C_SHARP_TYPES["integer"]
)
else:
STRUCT_RESPONSES[name] = "ApiItemsResponse<object>"
else:
pass
elif pattern_properties:
# Append unknown stuff (really, what the hell is this?) as object
STRUCT_RESPONSES[name] = "object"
else:
# Append response to a response list
STRUCT_RESPONSES[name] = C_SHARP_TYPES[response["type"]]
# Main actions
if __name__ == "__main__":
j_objects = JObjects('VkLib.Schema/objects.json', output=False, save=False)
j_responses = JResponses('VkLib.Schema/responses.json', output=False, save=False)
j_methods = JMethods('VkLib.Schema/methods.json', output=False, save=False)
| |
"""SCons.SConsign
Writing and reading information to the .sconsign file or files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConsign.py 4369 2009/09/19 15:58:29 scons"
import cPickle
import os
import os.path
import SCons.dblite
import SCons.Warnings
def corrupt_dblite_warning(filename):
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%filename)
SCons.dblite.ignore_corrupt_dbfiles = 1
SCons.dblite.corruption_warning = corrupt_dblite_warning
#XXX Get rid of the global array so this becomes re-entrant.
sig_files = []
# Info for the database SConsign implementation (now the default):
# "DataBase" is a dictionary that maps top-level SConstruct directories
# to open database handles.
# "DB_Module" is the Python database module to create the handles.
# "DB_Name" is the base name of the database file (minus any
# extension the underlying DB module will add).
DataBase = {}
DB_Module = SCons.dblite
DB_Name = ".sconsign"
DB_sync_list = []
def Get_DataBase(dir):
global DataBase, DB_Module, DB_Name
top = dir.fs.Top
if not os.path.isabs(DB_Name) and top.repositories:
mode = "c"
for d in [top] + top.repositories:
if dir.is_under(d):
try:
return DataBase[d], mode
except KeyError:
path = d.entry_abspath(DB_Name)
try: db = DataBase[d] = DB_Module.open(path, mode)
except (IOError, OSError): pass
else:
if mode != "r":
DB_sync_list.append(db)
return db, mode
mode = "r"
try:
return DataBase[top], "c"
except KeyError:
db = DataBase[top] = DB_Module.open(DB_Name, "c")
DB_sync_list.append(db)
return db, "c"
except TypeError:
print "DataBase =", DataBase
raise
def Reset():
"""Reset global state. Used by unit tests that end up using
SConsign multiple times to get a clean slate for each test."""
global sig_files, DB_sync_list
sig_files = []
DB_sync_list = []
normcase = os.path.normcase
def write():
global sig_files
for sig_file in sig_files:
sig_file.write(sync=0)
for db in DB_sync_list:
try:
syncmethod = db.sync
except AttributeError:
pass # Not all anydbm modules have sync() methods.
else:
syncmethod()
class SConsignEntry:
"""
Wrapper class for the generic entry in a .sconsign file.
The Node subclass populates it with attributes as it pleases.
XXX As coded below, we do expect a '.binfo' attribute to be added,
but we'll probably generalize this in the next refactorings.
"""
current_version_id = 1
def __init__(self):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
_version_id = self.current_version_id
def convert_to_sconsign(self):
self.binfo.convert_to_sconsign()
def convert_from_sconsign(self, dir, name):
self.binfo.convert_from_sconsign(dir, name)
class Base:
"""
This is the controlling class for the signatures for the collection of
entries associated with a specific directory. The actual directory
association will be maintained by a subclass that is specific to
the underlying storage method. This class provides a common set of
methods for fetching and storing the individual bits of information
that make up signature entry.
"""
def __init__(self):
self.entries = {}
self.dirty = False
self.to_be_merged = {}
def get_entry(self, filename):
"""
Fetch the specified entry attribute.
"""
return self.entries[filename]
def set_entry(self, filename, obj):
"""
Set the entry.
"""
self.entries[filename] = obj
self.dirty = True
def do_not_set_entry(self, filename, obj):
pass
def store_info(self, filename, node):
entry = node.get_stored_info()
entry.binfo.merge(node.get_binfo())
self.to_be_merged[filename] = node
self.dirty = True
def do_not_store_info(self, filename, node):
pass
def merge(self):
for key, node in self.to_be_merged.items():
entry = node.get_stored_info()
try:
ninfo = entry.ninfo
except AttributeError:
# This happens with SConf Nodes, because the configuration
# subsystem takes direct control over how the build decision
# is made and its information stored.
pass
else:
ninfo.merge(node.get_ninfo())
self.entries[key] = entry
self.to_be_merged = {}
class DB(Base):
"""
A Base subclass that reads and writes signature information
from a global .sconsign.db* file--the actual file suffix is
determined by the database module.
"""
def __init__(self, dir):
Base.__init__(self)
self.dir = dir
db, mode = Get_DataBase(dir)
# Read using the path relative to the top of the Repository
# (self.dir.tpath) from which we're fetching the signature
# information.
path = normcase(dir.tpath)
try:
rawentries = db[path]
except KeyError:
pass
else:
try:
self.entries = cPickle.loads(rawentries)
if type(self.entries) is not type({}):
self.entries = {}
raise TypeError
except KeyboardInterrupt:
raise
except Exception, e:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt sconsign entry : %s (%s)\n"%(self.dir.tpath, e))
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
if mode == "r":
# This directory is actually under a repository, which means
# likely they're reaching in directly for a dependency on
# a file there. Don't actually set any entry info, so we
# won't try to write to that .sconsign.dblite file.
self.set_entry = self.do_not_set_entry
self.store_info = self.do_not_store_info
global sig_files
sig_files.append(self)
def write(self, sync=1):
if not self.dirty:
return
self.merge()
db, mode = Get_DataBase(self.dir)
# Write using the path relative to the top of the SConstruct
# directory (self.dir.path), not relative to the top of
# the Repository; we only write to our own .sconsign file,
# not to .sconsign files in Repositories.
path = normcase(self.dir.path)
for key, entry in self.entries.items():
entry.convert_to_sconsign()
db[path] = cPickle.dumps(self.entries, 1)
if sync:
try:
syncmethod = db.sync
except AttributeError:
# Not all anydbm modules have sync() methods.
pass
else:
syncmethod()
class Dir(Base):
def __init__(self, fp=None, dir=None):
"""
fp - file pointer to read entries from
"""
Base.__init__(self)
if not fp:
return
self.entries = cPickle.load(fp)
if type(self.entries) is not type({}):
self.entries = {}
raise TypeError
if dir:
for key, entry in self.entries.items():
entry.convert_from_sconsign(dir, key)
class DirFile(Dir):
"""
Encapsulates reading and writing a per-directory .sconsign file.
"""
def __init__(self, dir):
"""
dir - the directory for the file
"""
self.dir = dir
self.sconsign = os.path.join(dir.path, '.sconsign')
try:
fp = open(self.sconsign, 'rb')
except IOError:
fp = None
try:
Dir.__init__(self, fp, dir)
except KeyboardInterrupt:
raise
except:
SCons.Warnings.warn(SCons.Warnings.CorruptSConsignWarning,
"Ignoring corrupt .sconsign file: %s"%self.sconsign)
global sig_files
sig_files.append(self)
def write(self, sync=1):
"""
Write the .sconsign file to disk.
Try to write to a temporary file first, and rename it if we
succeed. If we can't write to the temporary file, it's
probably because the directory isn't writable (and if so,
how did we build anything in this directory, anyway?), so
try to write directly to the .sconsign file as a backup.
If we can't rename, try to copy the temporary contents back
to the .sconsign file. Either way, always try to remove
the temporary file at the end.
"""
if not self.dirty:
return
self.merge()
temp = os.path.join(self.dir.path, '.scons%d' % os.getpid())
try:
file = open(temp, 'wb')
fname = temp
except IOError:
try:
file = open(self.sconsign, 'wb')
fname = self.sconsign
except IOError:
return
for key, entry in self.entries.items():
entry.convert_to_sconsign()
cPickle.dump(self.entries, file, 1)
file.close()
if fname != self.sconsign:
try:
mode = os.stat(self.sconsign)[0]
os.chmod(self.sconsign, 0666)
os.unlink(self.sconsign)
except (IOError, OSError):
# Try to carry on in the face of either OSError
# (things like permission issues) or IOError (disk
# or network issues). If there's a really dangerous
# issue, it should get re-raised by the calls below.
pass
try:
os.rename(fname, self.sconsign)
except OSError:
# An OSError failure to rename may indicate something
# like the directory has no write permission, but
# the .sconsign file itself might still be writable,
# so try writing on top of it directly. An IOError
# here, or in any of the following calls, would get
# raised, indicating something like a potentially
# serious disk or network issue.
open(self.sconsign, 'wb').write(open(fname, 'rb').read())
os.chmod(self.sconsign, mode)
try:
os.unlink(temp)
except (IOError, OSError):
pass
ForDirectory = DB
def File(name, dbm_module=None):
"""
Arrange for all signatures to be stored in a global .sconsign.db*
file.
"""
global ForDirectory, DB_Name, DB_Module
if name is None:
ForDirectory = DirFile
DB_Module = None
else:
ForDirectory = DB
DB_Name = name
if not dbm_module is None:
DB_Module = dbm_module
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
import numpy as np
import pytest
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, Series, crosstab
import pandas._testing as tm
class TestCrosstab:
def setup_method(self, method):
df = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df["A"], df["C"])
expected = df.groupby(["A", "C"]).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df["A"], [df["B"], df["C"]])
expected = df.groupby(["A", "B", "C"]).size()
expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df["B"], df["C"]], df["A"])
expected = df.groupby(["B", "C", "A"]).size()
expected = expected.unstack("A").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"))
expected = crosstab(df["a"], [df["b"], df["c"]])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c"))
expected = crosstab([df["b"], df["c"]], df["a"])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df["A"].values, self.df["C"].values)
assert result.index.name == "row_0"
assert result.columns.name == "col_0"
def test_crosstab_non_aligned(self):
# GH 17005
a = Series([0, 1, 1], index=["a", "b", "c"])
b = Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"])
c = np.array([3, 4, 3])
expected = DataFrame(
[[1, 0], [1, 1]],
index=Index([0, 1], name="row_0"),
columns=Index([3, 4], name="col_0"),
)
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["All", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["All"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("All", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["All"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("All", "")]))
exp_rows.name = "All"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name="TOTAL",
)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["TOTAL", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("TOTAL", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["TOTAL"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("TOTAL", "")]))
exp_rows.name = "TOTAL"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
msg = "margins_name argument must be a string"
for margins_name in [666, None, ["a", "b"]]:
with pytest.raises(ValueError, match=msg):
crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name=margins_name,
)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab(
[a, b], c, values, aggfunc=np.sum, rownames=["foo", "bar"], colnames=["baz"]
)
df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values})
expected = df.pivot_table(
"values", index=["foo", "bar"], columns="baz", aggfunc=np.sum
)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
res = crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False)
m = MultiIndex.from_tuples(
[("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")],
names=["b", "c"],
)
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = Series([1, 2, 3], index=[1, 2, 3])
s2 = Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = crosstab(df.a, df.b, margins=True, dropna=False)
expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=False)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
actual = crosstab(
a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[
["one", "one", "two", "two", "All"],
["dull", "shiny", "dull", "shiny", ""],
],
names=["b", "c"],
)
expected = DataFrame(
[[1, 0, 1, 0, 2], [2, 0, 1, 1, 5], [3, 0, 2, 1, 7]], columns=m
)
expected.index = Index(["bar", "foo", "All"], name="a")
tm.assert_frame_equal(actual, expected)
actual = crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 2, 7]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
actual = crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
rindex = Index([1, 2], name="a")
cindex = Index([3, 4], name="b")
full_normal = DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex)
row_normal = DataFrame([[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex)
col_normal = DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="all"), full_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize=True), full_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="index"), row_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="columns"), col_normal)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=1),
crosstab(df.a, df.b, normalize="columns"),
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index"),
)
row_normal_margins = DataFrame(
[[1.0, 0], [0.25, 0.75], [0.4, 0.6]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4], name="b", dtype="object"),
)
col_normal_margins = DataFrame(
[[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
index=Index([1, 2], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
all_normal_margins = DataFrame(
[[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins,
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins
)
# Test arrays
crosstab(
[np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])
)
# Test with aggfunc
norm_counts = DataFrame(
[[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_counts)
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]}
)
norm_sum = DataFrame(
[[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_sum)
def test_crosstab_with_empties(self):
# Check handling of empties
df = DataFrame(
{
"a": [1, 2, 2, 2, 2],
"b": [3, 3, 4, 4, 4],
"c": [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
empty = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
for i in [True, "index", "columns"]:
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i)
tm.assert_frame_equal(empty, calculated)
nans = DataFrame(
[[0.0, np.nan], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False)
tm.assert_frame_equal(nans, calculated)
def test_crosstab_errors(self):
# Issue 12578
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
error = "values cannot be used without an aggfunc."
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, values=df.c)
error = "aggfunc cannot be used without values"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, aggfunc=np.mean)
error = "Not a valid normalize argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="42")
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize=42)
error = "Not a valid margins argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="all", margins=42)
def test_crosstab_with_categorial_columns(self):
# GH 8860
df = DataFrame(
{
"MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"],
"MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"],
}
)
categories = ["Sedan", "Electric", "Pickup"]
df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories)
result = crosstab(df["MAKE"], df["MODEL"])
expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE")
expected_columns = CategoricalIndex(
categories, categories=categories, ordered=False, name="MODEL"
)
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
expected = DataFrame(
expected_data, index=expected_index, columns=expected_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_with_numpy_size(self):
# GH 4003
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": np.random.randn(24),
"E": np.random.randn(24),
}
)
result = crosstab(
index=[df["A"], df["B"]],
columns=[df["C"]],
margins=True,
aggfunc=np.size,
values=df["D"],
)
expected_index = MultiIndex(
levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]],
codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
names=["A", "B"],
)
expected_column = Index(["bar", "foo", "All"], dtype="object", name="C")
expected_data = np.array(
[
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[12.0, 12.0, 24.0],
]
)
expected = DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_crosstab_dup_index_names(self):
# GH 13279
s = Series(range(3), name="foo")
result = crosstab(s, s)
expected_index = Index(range(3), name="foo")
expected = DataFrame(
np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
def test_crosstab_tuple_name(self, names):
s1 = Series(range(3), name=names[0])
s2 = Series(range(1, 4), name=names[1])
mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names)
expected = Series(1, index=mi).unstack(1, fill_value=0)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_both_tuple_names(self):
# GH 18321
s1 = Series(range(3), name=("a", "b"))
s2 = Series(range(3), name=("c", "d"))
expected = DataFrame(
np.eye(3, dtype="int64"),
index=Index(range(3), name=("a", "b")),
columns=Index(range(3), name=("c", "d")),
)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_unsorted_order(self):
df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
result = crosstab(df.index, [df.b, df.a])
e_idx = Index(["A", "B", "C"], name="row_0")
e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"])
expected = DataFrame(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_normalize_multiple_columns(self):
# GH 15150
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": [0] * 24,
"E": [0] * 24,
}
)
result = crosstab(
[df.A, df.B],
df.C,
values=df.D,
aggfunc=np.sum,
normalize=True,
margins=True,
)
expected = DataFrame(
np.array([0] * 29 + [1], dtype=float).reshape(10, 3),
columns=Index(["bar", "foo", "All"], dtype="object", name="C"),
index=MultiIndex.from_tuples(
[
("one", "A"),
("one", "B"),
("one", "C"),
("three", "A"),
("three", "B"),
("three", "C"),
("two", "A"),
("two", "B"),
("two", "C"),
("All", ""),
],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize(self):
# GH 27500
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
# normalize on index
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
)
expected = DataFrame(
[[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.columns = Index(["large", "small"], dtype="object", name="C")
tm.assert_frame_equal(result, expected)
# normalize on columns
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
)
expected = DataFrame(
[
[0.25, 0.2, 0.222222],
[0.25, 0.2, 0.222222],
[0.5, 0.2, 0.333333],
[0, 0.4, 0.222222],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["bar", "foo"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
# normalize on both index and column
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
)
expected = DataFrame(
[
[0.111111, 0.111111, 0.222222],
[0.111111, 0.111111, 0.222222],
[0.222222, 0.111111, 0.333333],
[0.000000, 0.222222, 0.222222],
[0.444444, 0.555555, 1],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
| |
# -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
redirect(URL(f="staff", args="summary"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined Staff/Volunteers
Used for Summary view, Imports and S3AddPersonWidget2
"""
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def staff():
""" Staff Controller """
# Staff only
s3.filter = FS("type") == 1
def prep(r):
table = r.table
tablename = r.tablename
get_vars = r.get_vars
# Use CRUD strings for staff
crud_strings = s3.crud_strings
crud_strings[tablename] = crud_strings["hrm_staff"]
resource = r.resource
if "expiring" in get_vars:
# Filter for staff with contracts expiring in the next 4 weeks
query = FS("end_date") < \
(request.utcnow + datetime.timedelta(weeks=4))
resource.add_filter(query)
# Adapt CRUD strings
crud_strings[tablename].title_list = \
T("Staff with Contracts Expiring in the next Month")
# Reconfigure
resource.configure(# Sort by Expiry
sortby = table.end_date,
# Remove the Add button
insertable=False
)
# Adapt list_fields
list_fields = [(T("Contract End Date"), "end_date"),
"person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
]
else:
# Adapt list_fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
list_fields.append((T("Contract End Date"), "end_date"))
list_fields.append("status")
resource.configure(list_fields = list_fields)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
redirect(URL(f="person", vars=vars))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "staff"}))
elif not r.component and r.method != "delete":
# Configure site_id
field = table.site_id
site_id = get_vars.get("site_id", None)
if site_id:
field.default = site_id
field.writable = False
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (
settings.get_org_site_label(),
T("The facility where this position is based."),
#messages.AUTOCOMPLETE_HELP,
)))
#field.comment = S3AddResourceLink(c="org", f="facility",
# vars = dict(child="site_id",
# parent="req"),
# title=T("Add New Site"),
# )
# Hide status field
table.status.writable = table.status.readable = False
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972,
future=-192)
elif r.representation == "xls":
# Make it match Import sheets
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append(
{"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
#s3.scripts.append("/%s/static/scripts/jquery.doubleScroll.js" % appname)
#s3.jquery_ready.append('''$('.dataTable_table').doubleScroll()''')
#s3.jquery_ready.append('''$('.dataTables_wrapper').doubleScroll()''')
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
return s3db.hrm_person_controller()
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person", asset_asset="assigned_to_id")
group = get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
s3db.configure(tablename,
deletable=False)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
else:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
rheader=s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels & list_fields
s3db.hrm_configure_pr_group_membership()
# Only show Relief Teams
# Do not show system groups
# Only show Staff
table = db.pr_group_membership
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 1) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group_membership",
csv_template="group_membership",
csv_stylesheet=("hrm", "group_membership.xsl"),
)
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
s3.filter = FS("type").belongs((1, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def competency():
"""
RESTful CRUD controller used to allow searching for people by Skill
"""
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_competency_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| |
# Copyright 2014 David Tomaschik <david@systemoverlord.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import flask
import functools
import hashlib
import re
from sqlalchemy import exc
from sqlalchemy.orm import exc as orm_exc
import subprocess
from pwntalk import models
from pwntalk.app import app, get_flag
def require_login(func):
"""Send to homepage if not logged in."""
@functools.wraps(func)
def _login_wrapper(*args, **kwargs):
if not _check_login():
return flask.redirect(flask.url_for('home'))
return func(*args, **kwargs)
return _login_wrapper
@app.route('/')
def home():
_check_login()
return _render_posts_page(
models.Post.query.filter(models.Post.recipient == None))
@app.route('/login', methods=['POST'])
def login():
try:
user = models.User.query.filter(
models.User.username == flask.request.form['username'],
models.User.password == flask.request.form['password']
).one()
flask.session['user'] = user.uid
except orm_exc.NoResultFound:
flask.flash('Invalid username and/or password.', 'warning')
return flask.redirect(flask.url_for('home'))
@app.route('/logout', methods=['GET', 'POST'])
def logout():
del flask.session['user']
flask.flash('You are now logged out.', 'success')
return flask.redirect(flask.url_for('home'))
@app.route('/register', methods=['POST'])
def register():
try:
user = models.User.register(
flask.request.form['username'],
flask.request.form['email'],
flask.request.form['password'])
flask.session['user'] = user.uid
flask.flash('Registration successful.', 'success')
except exc.IntegrityError:
flask.flash('Duplicate username or email.', 'danger')
return flask.redirect(flask.url_for('home'))
@app.route('/profile', methods=['GET', 'POST'])
@require_login
def profile():
_validate_csrf()
flag = None
if flask.request.method == 'POST':
user = models.User.query.get(flask.request.form.get('uid'))
if not user:
flask.abort(404)
user.tagline = flask.request.form.get('tagline')
models.commit()
flask.flash('Profile updated.', 'success')
# Check for flag
if user.username == 'root' and flask.g.user.username in user.tagline.split():
flag = get_flag('user_profile_edited')
return _render_page('profile.html', flag=flag, user=flask.g.user)
@app.route('/post', methods=['POST'])
@require_login
def post():
_validate_csrf()
text = flask.request.form['text']
if not text:
flask.flash('Text is required.', 'warning')
elif len(text) > 200:
flask.flash('Text cannot be more than 200 characters.', 'warning')
else:
recipient = None
match = re.match('@([A-Za-z0-9_-]+)', text)
if match:
try:
recipient = models.User.query.filter(
models.User.username == match.group(1)).one()
except:
flask.flash('Could not find user for DM.', 'warning')
return flask.redirect(flask.request.form['redir'])
models.Post.post(flask.g.user, text, recipient)
return flask.redirect(flask.request.form['redir'])
@app.route('/u/<username>')
def user_page(username):
_check_login()
# SQLi
try:
query = 'SELECT * FROM user WHERE (username=\'%s\')' % username
user = models.User.query.from_statement(query).one()
except (exc.OperationalError, orm_exc.NoResultFound):
user = None
if not user:
flask.flash('No such user!', 'warning')
return flask.make_response(_render_page('error.html'), 404)
posts = user.posts.filter(models.Post.recipient_id == None)
return _render_posts_page(posts, user_profile=user)
@app.route('/direct_messages')
@require_login
def direct_messages():
posts = models.Post.query.filter(models.Post.recipient == flask.g.user)
return _render_posts_page(posts, user_profile=flask.g.user)
@app.route('/status', methods=['GET', 'POST'])
@require_login
def status():
def _make_admin_cookie(admin_value='False'):
raw = '%s|%s' % (flask.g.user.username, admin_value)
return base64.b64encode('%s|%s' % (raw, hashlib.md5(raw).hexdigest()))
def _validate_admin_cookie(cookie):
parts = base64.b64decode(cookie).split('|')
admin_value = parts[1]
if cookie != _make_admin_cookie(admin_value):
return False
return admin_value == 'True'
admin_cookie = flask.request.cookies.get('admin_status')
if not admin_cookie or not _validate_admin_cookie(admin_cookie):
resp = flask.make_response('Access Denied.', 403)
resp.set_cookie('admin_status', _make_admin_cookie())
return resp
page = flask.request.values.get('page', 'uptime')
# Sanitize this so users can't read everything
try:
hexpage = binascii.hexlify(page)
wrapper = app.config.get('SANDBOX_BIN', 'tools/cmdwrapper')
output = subprocess.check_output([wrapper, hexpage],
shell=False)
except Exception as ex:
flask.flash('Invalid command: ' + str(ex), 'danger')
return _render_page('error.html')
return _render_page(
'status.html', flag=get_flag('admin_console'), output=output)
@app.route('/robots.txt')
def robots_txt():
return open(flask.url_for('static', filename='robots.txt')).read()
def _render_posts_page(posts, **kwargs):
flag = None
if posts:
posts = posts.order_by(models.Post.posted.desc()).limit(20)
# Check for win
if 'user' in flask.g:
for post in posts:
if (post.author.username == 'HaplessTechnoweenie' and
post.recipient == flask.g.user):
flag = get_flag('dom_based_xss')
# TODO: pagination?
return _render_page(
'posts.html', posts=posts, flag=flag, **kwargs)
def _render_page(page, **kwargs):
return flask.render_template(page, csrftoken=_csrf_token(), **kwargs)
def _check_login():
if 'user' in flask.session:
if not 'user' in flask.g:
flask.g.user = models.User.query.get(int(flask.session['user']))
return flask.g.user
def _csrf_token():
_check_login()
try:
username = flask.g.user.username
except AttributeError:
return ''
md5sum = hashlib.md5(username).hexdigest()
return base64.b64encode('%s:%s' % (username, md5sum))
def _validate_csrf():
csrftoken = _csrf_token()
if flask.request.method != 'GET' and (not csrftoken or
flask.request.form['csrftoken'] != csrftoken):
print 'csrf check failed, got %s, expected %s' % (
flask.request.form['csrftoken'], csrftoken)
flask.abort(403)
return csrftoken
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as lib_constants
from oslo_utils import uuidutils
from neutron.agent.l3 import router_info
from neutron.agent.linux import ip_lib
from neutron.common import exceptions as n_exc
from neutron.conf.agent import common as config
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestRouterInfo(base.BaseTestCase):
def setUp(self):
super(TestRouterInfo, self).setUp()
conf = config.setup_conf()
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
self.ri_kwargs = {'agent_conf': conf,
'interface_driver': mock.sentinel.interface_driver}
def _check_agent_method_called(self, calls):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def test_routing_table_update(self):
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
ri.update_routing_table('replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
ri.update_routing_table('delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(expected)
def test_update_routing_table(self):
# Just verify the correct namespace was used in the call
uuid = _uuid()
netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
ri = router_info.RouterInfo(mock.Mock(), uuid,
{'id': uuid}, **self.ri_kwargs)
ri._update_routing_table = mock.Mock()
ri.update_routing_table('replace', fake_route1)
ri._update_routing_table.assert_called_once_with('replace',
fake_route1,
netns)
def test_routes_updated(self):
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
ri.routes_updated(fake_old_routes, fake_new_routes)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
ri.routes = fake_new_routes
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
ri.routes_updated(ri.routes, fake_new_routes)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
ri.routes_updated(ri.routes, fake_new_routes)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(expected)
def test__process_pd_iptables_rules(self):
subnet_id = _uuid()
ex_gw_port = {'id': _uuid()}
prefix = '2001:db8:cafe::/64'
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
ipv6_mangle = ri.iptables_manager.ipv6['mangle'] = mock.MagicMock()
ri.get_ex_gw_port = mock.Mock(return_value=ex_gw_port)
ri.get_external_device_name = mock.Mock(return_value='fake_device')
ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark')
ri._process_pd_iptables_rules(prefix, subnet_id)
mangle_rule = '-d %s ' % prefix
mangle_rule += ri.address_scope_mangle_rule('fake_device', 'fake_mark')
ipv6_mangle.add_rule.assert_called_once_with(
'scope',
mangle_rule,
tag='prefix_delegation_%s' % subnet_id)
def test_add_ports_address_scope_iptables(self):
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
port = {
'id': _uuid(),
'fixed_ips': [{'ip_address': '172.9.9.9'}],
'address_scopes': {lib_constants.IP_VERSION_4: '1234'}
}
ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock()
ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark')
ri.get_internal_device_name = mock.Mock(return_value='fake_device')
ri.rt_tables_manager = mock.MagicMock()
ri.process_external_port_address_scope_routing = mock.Mock()
ri.process_floating_ip_address_scope_rules = mock.Mock()
ri.iptables_manager._apply = mock.Mock()
ri.router[lib_constants.INTERFACE_KEY] = [port]
ri.process_address_scope()
ipv4_mangle.add_rule.assert_called_once_with(
'scope', ri.address_scope_mangle_rule('fake_device', 'fake_mark'))
def test_address_scope_mark_ids_handling(self):
mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN,
router_info.ADDRESS_SCOPE_MARK_ID_MAX))
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
# first mark id is used for the default address scope
scope_to_mark_id = {router_info.DEFAULT_ADDRESS_SCOPE: mark_ids.pop()}
self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id)
self.assertEqual(mark_ids, ri.available_mark_ids)
# new id should be used for new address scope
ri.get_address_scope_mark_mask('new_scope')
scope_to_mark_id['new_scope'] = mark_ids.pop()
self.assertEqual(scope_to_mark_id, ri._address_scope_to_mark_id)
self.assertEqual(mark_ids, ri.available_mark_ids)
# new router should have it's own mark ids set
new_mark_ids = set(range(router_info.ADDRESS_SCOPE_MARK_ID_MIN,
router_info.ADDRESS_SCOPE_MARK_ID_MAX))
new_ri = router_info.RouterInfo(mock.Mock(), _uuid(),
{}, **self.ri_kwargs)
new_mark_ids.pop()
self.assertEqual(new_mark_ids, new_ri.available_mark_ids)
self.assertTrue(ri.available_mark_ids != new_ri.available_mark_ids)
def test_process_delete(self):
ri = router_info.RouterInfo(mock.Mock(), _uuid(), {}, **self.ri_kwargs)
ri.router = {'id': _uuid()}
with mock.patch.object(ri, '_process_internal_ports') as p_i_p,\
mock.patch.object(ri, '_process_external_on_delete') as p_e_o_d:
self.mock_ip.netns.exists.return_value = False
ri.process_delete()
self.assertFalse(p_i_p.called)
self.assertFalse(p_e_o_d.called)
p_i_p.reset_mock()
p_e_o_d.reset_mock()
self.mock_ip.netns.exists.return_value = True
ri.process_delete()
p_i_p.assert_called_once_with()
p_e_o_d.assert_called_once_with()
class BasicRouterTestCaseFramework(base.BaseTestCase):
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
self.router_id = _uuid()
return router_info.RouterInfo(mock.Mock(),
self.router_id,
router,
self.agent_conf,
mock.sentinel.interface_driver,
**kwargs)
class TestBasicRouterOperations(BasicRouterTestCaseFramework):
def test_get_floating_ips(self):
router = mock.MagicMock()
router.get.return_value = [mock.sentinel.floating_ip]
ri = self._create_router(router)
fips = ri.get_floating_ips()
self.assertEqual([mock.sentinel.floating_ip], fips)
def test_process_floating_ip_nat_rules(self):
ri = self._create_router()
fips = [{'fixed_ip_address': mock.sentinel.ip,
'floating_ip_address': mock.sentinel.fip}]
ri.get_floating_ips = mock.Mock(return_value=fips)
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.floating_forward_rules = mock.Mock(
return_value=[(mock.sentinel.chain, mock.sentinel.rule)])
ri.process_floating_ip_nat_rules()
# Be sure that the rules are cleared first and apply is called last
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_nat.mock_calls[0])
self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
# Be sure that add_rule is called somewhere in the middle
ipv4_nat.add_rule.assert_called_once_with(mock.sentinel.chain,
mock.sentinel.rule,
tag='floating_ip')
def test_process_floating_ip_nat_rules_removed(self):
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[])
ri.iptables_manager = mock.MagicMock()
ipv4_nat = ri.iptables_manager.ipv4['nat']
ri.process_floating_ip_nat_rules()
# Be sure that the rules are cleared first and apply is called last
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_nat.mock_calls[0])
self.assertEqual(mock.call.apply(), ri.iptables_manager.mock_calls[-1])
# Be sure that add_rule is called somewhere in the middle
self.assertFalse(ipv4_nat.add_rule.called)
def test_process_floating_ip_address_scope_rules_diff_scopes(self):
ri = self._create_router()
fips = [{'fixed_ip_address': mock.sentinel.ip,
'floating_ip_address': mock.sentinel.fip,
'fixed_ip_address_scope': 'scope1'}]
ri.get_floating_ips = mock.Mock(return_value=fips)
ri._get_external_address_scope = mock.Mock(return_value='scope2')
ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock()
ri.floating_mangle_rules = mock.Mock(
return_value=[(mock.sentinel.chain1, mock.sentinel.rule1)])
ri.get_external_device_name = mock.Mock()
ri.process_floating_ip_address_scope_rules()
# Be sure that the rules are cleared first
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_mangle.mock_calls[0])
# Be sure that add_rule is called somewhere in the middle
self.assertEqual(1, ipv4_mangle.add_rule.call_count)
self.assertEqual(mock.call.add_rule(mock.sentinel.chain1,
mock.sentinel.rule1,
tag='floating_ip'),
ipv4_mangle.mock_calls[1])
def test_process_floating_ip_address_scope_rules_same_scopes(self):
ri = self._create_router()
fips = [{'fixed_ip_address': mock.sentinel.ip,
'floating_ip_address': mock.sentinel.fip,
'fixed_ip_address_scope': 'scope1'}]
ri.get_floating_ips = mock.Mock(return_value=fips)
ri._get_external_address_scope = mock.Mock(return_value='scope1')
ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock()
ri.process_floating_ip_address_scope_rules()
# Be sure that the rules are cleared first
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_mangle.mock_calls[0])
# Be sure that add_rule is not called somewhere in the middle
self.assertFalse(ipv4_mangle.add_rule.called)
def test_process_floating_ip_mangle_rules_removed(self):
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[])
ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock()
ri.process_floating_ip_address_scope_rules()
# Be sure that the rules are cleared first
self.assertEqual(mock.call.clear_rules_by_tag('floating_ip'),
ipv4_mangle.mock_calls[0])
# Be sure that add_rule is not called somewhere in the middle
self.assertFalse(ipv4_mangle.add_rule.called)
def _test_add_fip_addr_to_device_error(self, device):
ri = self._create_router()
ip = '15.1.2.3'
result = ri._add_fip_addr_to_device(
{'id': mock.sentinel.id, 'floating_ip_address': ip}, device)
device.addr.add.assert_called_with(ip + '/32')
return result
def test__add_fip_addr_to_device(self):
result = self._test_add_fip_addr_to_device_error(mock.Mock())
self.assertTrue(result)
def test__add_fip_addr_to_device_error(self):
device = mock.Mock()
device.addr.add.side_effect = RuntimeError
result = self._test_add_fip_addr_to_device_error(device)
self.assertFalse(result)
def test_process_snat_dnat_for_fip(self):
ri = self._create_router()
ri.process_floating_ip_nat_rules = mock.Mock(side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
ri.process_snat_dnat_for_fip)
ri.process_floating_ip_nat_rules.assert_called_once_with()
def test_put_fips_in_error_state(self):
ri = self._create_router()
ri.router = mock.Mock()
ri.router.get.return_value = [{'id': mock.sentinel.id1},
{'id': mock.sentinel.id2}]
statuses = ri.put_fips_in_error_state()
expected = [{mock.sentinel.id1: lib_constants.FLOATINGIP_STATUS_ERROR,
mock.sentinel.id2: lib_constants.FLOATINGIP_STATUS_ERROR}]
self.assertNotEqual(expected, statuses)
def test_configure_fip_addresses(self):
ri = self._create_router()
ri.process_floating_ip_addresses = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
ri.configure_fip_addresses,
mock.sentinel.interface_name)
ri.process_floating_ip_addresses.assert_called_once_with(
mock.sentinel.interface_name)
def test_get_router_cidrs_returns_cidrs(self):
ri = self._create_router()
addresses = ['15.1.2.2/24', '15.1.2.3/32']
device = mock.MagicMock()
device.addr.list.return_value = [{'cidr': addresses[0]},
{'cidr': addresses[1]}]
self.assertEqual(set(addresses), ri.get_router_cidrs(device))
@mock.patch.object(ip_lib, 'IPDevice')
class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework):
def test_process_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2',
'status': lib_constants.FLOATINGIP_STATUS_DOWN
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[fip])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = self._create_router()
ri.floating_ips = [fip]
ri.get_floating_ips = mock.Mock(return_value=[])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertIsNone(fip_statuses.get(fip_id))
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock(side_effect=RuntimeError)
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2',
'status': 'DOWN'
}
ri = self._create_router()
ri.add_floating_ip = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ERROR)
ri.get_floating_ips = mock.Mock(return_value=[fip])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
# TODO(mrsmith): refactor for DVR cases
def test_process_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = self._create_router()
ri.remove_floating_ip = mock.Mock()
ri.router.get = mock.Mock(return_value=[])
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({}, fip_statuses)
ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32')
def test_process_floating_ip_reassignment(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.3',
'status': 'DOWN'
}
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[fip])
ri.move_floating_ip = mock.Mock()
ri.fip_map = {'15.1.2.3': '192.168.0.2'}
ri.process_floating_ip_addresses(mock.sentinel.interface_name)
ri.move_floating_ip.assert_called_once_with(fip)
def test_process_floating_ip_addresses_gw_secondary_ip_not_removed(
self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '1.1.1.1/16'},
{'cidr': '2.2.2.2/32'},
{'cidr': '3.3.3.3/32'},
{'cidr': '4.4.4.4/32'}]
ri = self._create_router()
ri.get_floating_ips = mock.Mock(return_value=[
{'id': _uuid(),
'floating_ip_address': '3.3.3.3',
'status': 'DOWN'}])
ri.add_floating_ip = mock.Mock()
ri.get_ex_gw_port = mock.Mock(return_value={
"fixed_ips": [{"ip_address": "1.1.1.1"},
{"ip_address": "2.2.2.2"}]})
ri.remove_floating_ip = mock.Mock()
ri.process_floating_ip_addresses("qg-fake-device")
ri.remove_floating_ip.assert_called_once_with(device, '4.4.4.4/32')
| |
# Despy: A discrete event simulation framework for Python
# Version 0.1
# Released under the MIT License (MIT)
# Copyright (c) 2015, Stacy Irwin
"""
******************
despy.model.session
******************
.. autosummary::
Session
Config
"""
import enum
from despy.abstract.model import AbstractModel
class Format(enum.Enum):
text = 1
html = 2
class Config(object):
"""Generates the simulation's output reports and graphs.
**Members**
.. autosummary::
trace_start
trace_stop
trace_max_length
console_trace
folder_basename
reps
initial_time
seed
"""
def __init__(self):
"""Construct a Config object.
*Arguments*
``simulation`` :class:`despy.model.simulation.Simulation`
Corresponding simulation object.
"""
#Public Attributes
self.folder_basename = None
self.write_files = True
self.console_trace = False
self.console_format = Format.text
self._trace_start = 0
self._trace_stop = 500
self._trace_max_length = 1000
self._trace_reps = (0, 1)
self._reps = 1
self.initial_time = 0
self._seed = None
self._full_path = None
@property
def trace_start(self):
"""Trace starts recording at this simulation time. Default = 0.
*Type:* Integer
"""
return self._trace_start
@trace_start.setter
def trace_start(self, start):
self._trace_start = start
@property
def trace_max_length(self):
"""Max number of TraceRecords in Trace object. Default = 1000.
*Type:* Integer
"""
return self._trace_max_length
@trace_max_length.setter
def trace_max_length(self, max_length):
self._trace_max_length = max_length
@property
def trace_stop(self):
"""Trace stops recording at this simulation time. Default = 500.
*Type:* Integer
"""
return self._trace_stop
@trace_stop.setter
def trace_stop(self, stop):
try:
if stop > self.trace_start:
self._trace_stop = round(stop)
except:
pass
@property
def console_trace(self):
"""If True, send Trace data to console output. Default = True.
*Type:* Boolean
"""
return self._console_trace
@console_trace.setter
def console_trace(self, console_trace):
self._console_trace = console_trace
@property
def write_files(self):
"""Disables writing of output files if False.
*Type:* Boolean
"""
return self._write_files
@write_files.setter
def write_files(self, write):
self._write_files = write
@property
def folder_basename(self):
"""Folder where output reports and graphs will be placed.
If ``None`` (the default value), the simulation will not
generate any output or trace files. The value stored in
``folder_basename`` should be an absolute reference.
For example::
gen.folder_basename = "C:/despy_output/resource_sim"
The Generator object will add a time-stamp to the end of the
folder name when generating the output files. This allows the
use to run the simulation several times without overwriting
data from previous simulations.
"""
return self._folder_basename
@folder_basename.setter
def folder_basename(self, basename):
self._folder_basename = basename
@property
def reps(self):
"""Number of replications in simulation. Default = 1.
*Type:* Integer
"""
return self._reps
@reps.setter
def reps(self, reps):
self._reps = reps
@property
def initial_time(self):
"""Earliest time in simulation. Default = 0.
*Type:* Integer
"""
return self._initial_time
@initial_time.setter
def initial_time(self, initial_time):
self._initial_time = initial_time
@property
def seed(self):
"""Calls seed methods in both numpy and standard random modules.
Set seed to an integer, or to ``None`` (default).
By default (i.e., when seed is set to None), Despy will use a
different seed, and hence a different random number sequence for
each run of the simulation. For troubleshooting or testing
purposes, it's often useful to repeatedly run the simulation
with the same sequence of random numbers. This can be
accomplished by setting the seed variable.
Designers should use this seed property when seeding the random
number generators. While despy will use the numpy random number
generator instead of the generator built into Python's random
module, we can't guarantee that Python random module functions
won't sneak into a custom subclass. The numpy and Python random
number generators use different random number sequences, so it's
necessary to seed both generators to ensure a consistent random
number sequence thoughout the simulation.
return self._seed
"""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
class Session:
"""Singleton class connecting Simulation, Model, and Config objects.
No matter how many times a user instantiates a Session object,
``Session()`` will always return the same object. The Simulation
and Model objects access each other and the Config object via
Session properties.
The designer can obtain a brand new Session object by calling the
static method, Session.new(). This is useful if the designer is
commencing a new simulation, ensuring that no old configuration or
session data is inadvertently brought forward into the new session.
**Properties**
.. autosummary::
sim
model
config
**Methods**
.. autosummary
new
**Raises**
* :class:`TypeError` if object other than AbstractModel is passed
to Session.model property.
"""
def __init__(self):
"""Always returns the same Session instance.
If the designer creates a new session by calling Session.new(),
subsequent calls to Session() will return the new session.
"""
if Session._instance is None:
Session._instance = Session.__Session()
#Static Session instance.
_instance = None
@property
def sim(self):
"""Current assigned Simulation object.
*Type:* :class:`despy.simulation.Simulation` object.
"""
return self._instance._sim
@sim.setter
def sim(self, sim):
self._instance._sim = sim
@property
def model(self):
"""Currently assigned model (top-level component) object.
*Type:* :class:`despy.model.abstract.AbstractModel`
"""
return self._instance._model
@model.setter
def model(self, model):
if isinstance(model, AbstractModel):
self._instance._model = model
else:
raise TypeError("Session.model must be set to "
"instance of despy.model.abstract.AbstractModel. "
"{} was provided instead.".format(type(model)))
@property
def config(self):
"""Current configuration object.
*Type:* :class:`despy.session.Config`
"""
return self._instance._config
@config.setter
def config(self, config):
self._instance._config = config
@property
def results(self):
"""Provides access to simulation output.
*Type:* :class:`despy.output.results.Results`
"""
return self._instance._results
@results.setter
def results(self, results):
self._instance._results = results
@staticmethod
def new(config = None):
"""Creates and returns a new Session instance.
"""
Session._instance = Session.__Session()
if config is not None:
Session._instance._config = config
return Session()
class __Session:
def __init__(self):
self._sim = None
self._model = None
self._config = Config()
self._results = None
| |
import copy
import math
from django.test import TestCase
from mock import patch, call, MagicMock
from graphite.render.datalib import TimeSeries
from graphite.render import functions
def return_greater(series, value):
return [i for i in series if i is not None and i > value]
def return_less(series, value):
return [i for i in series if i is not None and i < value]
class FunctionsTest(TestCase):
def test_highest_max(self):
config = [20, 50, 30, 40]
seriesList = [range(max_val) for max_val in config]
# Expect the test results to be returned in decending order
expected = [
[seriesList[1]],
[seriesList[1], seriesList[3]],
[seriesList[1], seriesList[3], seriesList[2]],
# Test where num_return == len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
# Test where num_return > len(seriesList)
[seriesList[1], seriesList[3], seriesList[2], seriesList[0]],
]
for index, test in enumerate(expected):
results = functions.highestMax({}, seriesList, index + 1)
self.assertEqual(test, results)
def test_highest_max_empty_series_list(self):
# Test the function works properly with an empty seriesList provided.
self.assertEqual([], functions.highestMax({}, [], 1))
def testGetPercentile(self):
seriesList = [
([None, None, 15, 20, 35, 40, 50], 20),
(range(100), 30),
(range(200), 60),
(range(300), 90),
(range(1, 101), 31),
(range(1, 201), 61),
(range(1, 301), 91),
(range(0, 102), 30),
(range(1, 203), 61),
(range(1, 303), 91),
]
for index, conf in enumerate(seriesList):
series, expected = conf
result = functions._getPercentile(series, 30)
self.assertEqual(expected, result, 'For series index <%s> the 30th percentile ordinal is not %d, but %d ' % (index, expected, result))
def test_n_percentile(self):
seriesList = []
config = [
[15, 35, 20, 40, 50],
range(1, 101),
range(1, 201),
range(1, 301),
range(0, 100),
range(0, 200),
range(0, 300),
# Ensure None values in list has no effect.
[None, None, None] + range(0, 300),
]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 1, 1, c))
def n_percentile(perc, expected):
result = functions.nPercentile({}, seriesList, perc)
self.assertEqual(expected, result)
n_percentile(30, [[20], [31], [61], [91], [30], [60], [90], [90]])
n_percentile(90, [[50], [91], [181], [271], [90], [180], [270], [270]])
n_percentile(95, [[50], [96], [191], [286], [95], [190], [285], [285]])
def test_sorting_by_total(self):
seriesList = []
config = [[1000, 100, 10, 0], [1000, 100, 10, 1]]
for i, c in enumerate(config):
seriesList.append(TimeSeries('Test(%d)' % i, 0, 0, 0, c))
self.assertEqual(1110, functions.safeSum(seriesList[0]))
result = functions.sortByTotal({}, seriesList)
self.assertEqual(1111, functions.safeSum(result[0]))
self.assertEqual(1110, functions.safeSum(result[1]))
def _generate_series_list(self):
seriesList = []
config = [range(101), range(101), [1, None, None, None, None]]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
seriesList.append(TimeSeries(name, 0, 1, 1, c))
return seriesList
def test_remove_above_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeAbovePercentile({}, seriesList, percent)
for result in results:
self.assertListEqual(return_greater(result, percent), [])
def test_remove_below_percentile(self):
seriesList = self._generate_series_list()
percent = 50
results = functions.removeBelowPercentile({}, seriesList, percent)
expected = [[], [], [1]]
for i, result in enumerate(results):
self.assertListEqual(return_less(result, percent), expected[i])
def test_remove_above_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeAboveValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_greater(result, value), [])
def test_remove_below_value(self):
seriesList = self._generate_series_list()
value = 5
results = functions.removeBelowValue({}, seriesList, value)
for result in results:
self.assertListEqual(return_less(result, value), [])
def test_limit(self):
seriesList = self._generate_series_list()
limit = len(seriesList) - 1
results = functions.limit({}, seriesList, limit)
self.assertEqual(len(results), limit,
"More than {0} results returned".format(limit),
)
def _verify_series_options(self, seriesList, name, value):
"""
Verify a given option is set and True for each series in a
series list
"""
for series in seriesList:
self.assertIn(name, series.options)
if value is True:
test_func = self.assertTrue
else:
test_func = self.assertEqual
test_func(series.options.get(name), value)
def test_second_y_axis(self):
seriesList = self._generate_series_list()
results = functions.secondYAxis({}, seriesList)
self._verify_series_options(results, "secondYAxis", True)
def test_draw_as_infinite(self):
seriesList = self._generate_series_list()
results = functions.drawAsInfinite({}, seriesList)
self._verify_series_options(results, "drawAsInfinite", True)
def test_line_width(self):
seriesList = self._generate_series_list()
width = 10
results = functions.lineWidth({}, seriesList, width)
self._verify_series_options(results, "lineWidth", width)
def test_transform_null(self):
seriesList = self._generate_series_list()
transform = -5
results = functions.transformNull({}, copy.deepcopy(seriesList), transform)
for counter, series in enumerate(seriesList):
if not None in series:
continue
# If the None values weren't transformed, there is a problem
self.assertNotIn(None, results[counter],
"tranformNull should remove all None values",
)
# Anywhere a None was in the original series, verify it
# was transformed to the given value it should be.
for i, value in enumerate(series):
if value is None:
result_val = results[counter][i]
self.assertEqual(transform, result_val,
"Transformed value should be {0}, not {1}".format(transform, result_val),
)
def test_alias(self):
seriesList = self._generate_series_list()
substitution = "Ni!"
results = functions.alias({}, seriesList, substitution)
for series in results:
self.assertEqual(series.name, substitution)
def test_alias_sub(self):
seriesList = self._generate_series_list()
substitution = "Shrubbery"
results = functions.aliasSub({}, seriesList, "^\w+", substitution)
for series in results:
self.assertTrue(series.name.startswith(substitution),
"aliasSub should replace the name with {0}".format(substitution),
)
# TODO: Add tests for * globbing and {} matching to this
def test_alias_by_node(self):
seriesList = self._generate_series_list()
def verify_node_name(*nodes):
if isinstance(nodes, int):
node_number = [nodes]
# Use deepcopy so the original seriesList is unmodified
results = functions.aliasByNode({}, copy.deepcopy(seriesList), *nodes)
for i, series in enumerate(results):
fragments = seriesList[i].name.split('.')
# Super simplistic. Doesn't match {thing1,thing2}
# or glob with *, both of what graphite allow you to use
expected_name = '.'.join([fragments[i] for i in nodes])
self.assertEqual(series.name, expected_name)
verify_node_name(1)
verify_node_name(1, 0)
verify_node_name(-1, 0)
# Verify broken input causes broken output
with self.assertRaises(IndexError):
verify_node_name(10000)
def test_alpha(self):
seriesList = self._generate_series_list()
alpha = 0.5
results = functions.alpha({}, seriesList, alpha)
self._verify_series_options(results, "alpha", alpha)
def test_color(self):
seriesList = self._generate_series_list()
color = "red"
# Leave the original seriesList unmodified
results = functions.color({}, copy.deepcopy(seriesList), color)
for i, series in enumerate(results):
self.assertTrue(hasattr(series, "color"),
"The transformed seriesList is missing the 'color' attribute",
)
self.assertFalse(hasattr(seriesList[i], "color"),
"The original seriesList shouldn't have a 'color' attribute",
)
self.assertEqual(series.color, color)
def test_scale(self):
seriesList = self._generate_series_list()
multiplier = 2
# Leave the original seriesList undisturbed for verification
results = functions.scale({}, copy.deepcopy(seriesList), multiplier)
for i, series in enumerate(results):
for counter, value in enumerate(series):
if value is None:
continue
original_value = seriesList[i][counter]
expected_value = original_value * multiplier
self.assertEqual(value, expected_value)
def _generate_mr_series(self):
seriesList = [
TimeSeries('group.server1.metric1',0,1,1,[None]),
TimeSeries('group.server1.metric2',0,1,1,[None]),
TimeSeries('group.server2.metric1',0,1,1,[None]),
TimeSeries('group.server2.metric2',0,1,1,[None]),
]
mappedResult = [
[seriesList[0],seriesList[1]],
[seriesList[2],seriesList[3]]
]
return (seriesList,mappedResult)
def test_mapSeries(self):
seriesList, expectedResult = self._generate_mr_series()
results = functions.mapSeries({}, copy.deepcopy(seriesList), 1)
self.assertEqual(results,expectedResult)
def test_reduceSeries(self):
sl, inputList = self._generate_mr_series()
expectedResult = [
TimeSeries('group.server1.reduce.mock',0,1,1,[None]),
TimeSeries('group.server2.reduce.mock',0,1,1,[None])
]
resultSeriesList = [TimeSeries('mock(series)',0,1,1,[None])]
mock = MagicMock(return_value = resultSeriesList)
with patch.dict(functions.SeriesFunctions,{ 'mock': mock }):
results = functions.reduceSeries({}, copy.deepcopy(inputList), "mock", 2, "metric1","metric2" )
self.assertEqual(results,expectedResult)
self.assertEqual(mock.mock_calls, [call({},inputList[0]), call({},inputList[1])])
def test_pow(self):
seriesList = self._generate_series_list()
factor = 2
# Leave the original seriesList undisturbed for verification
results = functions.pow({}, copy.deepcopy(seriesList), factor)
for i, series in enumerate(results):
for counter, value in enumerate(series):
if value is None:
continue
original_value = seriesList[i][counter]
expected_value = math.pow(original_value, factor)
self.assertEqual(value, expected_value)
def test_squareRoot(self):
seriesList = self._generate_series_list()
# Leave the original seriesList undisturbed for verification
results = functions.squareRoot({}, copy.deepcopy(seriesList))
for i, series in enumerate(results):
for counter, value in enumerate(series):
original_value = seriesList[i][counter]
if value is None:
self.assertEqual(original_value, None)
continue
expected_value = math.pow(original_value, 0.5)
self.assertEqual(value, expected_value)
def test_invert(self):
seriesList = self._generate_series_list()
# Leave the original seriesList undisturbed for verification
results = functions.invert({}, copy.deepcopy(seriesList))
for i, series in enumerate(results):
for counter, value in enumerate(series):
original_value = seriesList[i][counter]
if value is None:
continue
expected_value = math.pow(original_value, -1)
self.assertEqual(value, expected_value)
| |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
def remove_debug_line_numbers(contents):
"""Function to remove the line numbers from the debug output
of gyp and thus remove the exremem fragility of the stdout
comparison tests.
"""
lines = contents.splitlines()
# split each line on ":"
lines = [l.split(":", 3) for l in lines]
# join each line back together while ignoring the
# 3rd column which is the line number
lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
return "\n".join(lines)
def match_modulo_line_numbers(contents_a, contents_b):
"""File contents matcher that ignores line numbers."""
contents_a = remove_debug_line_numbers(contents_a)
contents_b = remove_debug_line_numbers(contents_b)
return TestCommon.match_exact(contents_a, contents_b)
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
self.extra_args = sys.argv[1:]
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.initialize_build_tool()
kw.setdefault('match', TestCommon.match_exact)
# Put test output in out/testworkarea by default.
# Use temporary names so there are no collisions.
workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
# Create work area if it doesn't already exist.
if not os.path.isdir(workdir):
os.makedirs(workdir)
kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
formats = kw.pop('formats', [])
super(TestGypBase, self).__init__(*args, **kw)
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+self.format in excluded_formats or
included_formats and self.format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = self.workpath()
# Clear $GYP_DEFINES for the same reason.
if 'GYP_DEFINES' in os.environ:
del os.environ['GYP_DEFINES']
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make, scons) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# When running gyp, and comparing its output we use a comparitor
# that ignores the line numbers that gyp logs in its debug output.
if kw.pop('ignore_line_numbers', False):
kw.setdefault('match', match_modulo_line_numbers)
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = kw.pop('depth', '.')
run_args = ['--depth='+depth, '--format='+self.format, gyp_file]
run_args.extend(self.extra_args)
run_args.extend(args)
return self.run(program=self.gyp, arguments=run_args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the specified type.
A bare=True keyword argument specifies that prefixes and suffixes shouldn't
be applied.
"""
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
class TestGypCustom(TestGypBase):
"""
Subclass for testing the GYP with custom generator
"""
def __init__(self, gyp=None, *args, **kw):
self.format = kw.pop("format")
super(TestGypCustom, self).__init__(*args, **kw)
class TestGypAndroid(TestGypBase):
"""
Subclass for testing the GYP Android makefile generator. Note that
build/envsetup.sh and lunch must have been run before running tests.
TODO: This is currently an incomplete implementation. We do not support
run_built_executable(), so we pass only tests which do not use this. As a
result, support for host targets is not properly tested.
"""
format = 'android'
# Note that we can't use mmm as the build tool because ...
# - it builds all targets, whereas we need to pass a target
# - it is a function, whereas the test runner assumes the build tool is a file
# Instead we use make and duplicate the logic from mmm.
build_tool_list = ['make']
# We use our custom target 'gyp_all_modules', as opposed to the 'all_modules'
# target used by mmm, to build only those targets which are part of the gyp
# target 'all'.
ALL = 'gyp_all_modules'
def __init__(self, gyp=None, *args, **kw):
# Android requires build and test output to be outside its source tree.
# We use the following working directory for the test's source, but the
# test's build output still goes to $ANDROID_PRODUCT_OUT.
# Note that some tests explicitly set format='gypd' to invoke the gypd
# backend. This writes to the source tree, but there's no way around this.
kw['workdir'] = os.path.join('/tmp', 'gyptest',
kw.get('workdir', 'testworkarea'))
# We need to remove all gyp outputs from out/. Ths is because some tests
# don't have rules to regenerate output, so they will simply re-use stale
# output if present. Since the test working directory gets regenerated for
# each test run, this can confuse things.
# We don't have a list of build outputs because we don't know which
# dependent targets were built. Instead we delete all gyp-generated output.
# This may be excessive, but should be safe.
out_dir = os.environ['ANDROID_PRODUCT_OUT']
obj_dir = os.path.join(out_dir, 'obj')
shutil.rmtree(os.path.join(obj_dir, 'GYP'), ignore_errors = True)
shutil.rmtree(os.path.join(obj_dir, 'NONE'), ignore_errors = True)
for x in ['EXECUTABLES', 'STATIC_LIBRARIES', 'SHARED_LIBRARIES']:
for d in os.listdir(os.path.join(obj_dir, x)):
if d.endswith('_gyp_intermediates'):
shutil.rmtree(os.path.join(obj_dir, x, d), ignore_errors = True)
for x in [os.path.join('obj', 'lib'), os.path.join('system', 'lib')]:
for d in os.listdir(os.path.join(out_dir, x)):
if d.endswith('_gyp.so'):
os.remove(os.path.join(out_dir, x, d))
super(TestGypAndroid, self).__init__(*args, **kw)
def target_name(self, target):
if target == self.ALL:
return self.ALL
# The default target is 'droid'. However, we want to use our special target
# to build only the gyp target 'all'.
if target in (None, self.DEFAULT):
return self.ALL
return target
def build(self, gyp_file, target=None, **kw):
"""
Runs a build using the Android makefiles generated from the specified
gyp_file. This logic is taken from Android's mmm.
"""
arguments = kw.get('arguments', [])[:]
arguments.append(self.target_name(target))
arguments.append('-C')
arguments.append(os.environ['ANDROID_BUILD_TOP'])
kw['arguments'] = arguments
chdir = kw.get('chdir', '')
makefile = os.path.join(self.workdir, chdir, 'GypAndroid.mk')
os.environ['ONE_SHOT_MAKEFILE'] = makefile
result = self.run(program=self.build_tool, **kw)
del os.environ['ONE_SHOT_MAKEFILE']
return result
def android_module(self, group, name, subdir):
if subdir:
name = '%s_%s' % (subdir, name)
if group == 'SHARED_LIBRARIES':
name = 'lib_%s' % name
return '%s_gyp' % name
def intermediates_dir(self, group, module_name):
return os.path.join(os.environ['ANDROID_PRODUCT_OUT'], 'obj', group,
'%s_intermediates' % module_name)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Android. Note that we don't support the configuration
parameter.
"""
# Built files are in $ANDROID_PRODUCT_OUT. This requires copying logic from
# the Android build system.
if type == None:
return os.path.join(os.environ['ANDROID_PRODUCT_OUT'], 'obj', 'GYP',
'shared_intermediates', name)
subdir = kw.get('subdir')
if type == self.EXECUTABLE:
# We don't install executables
group = 'EXECUTABLES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name), name)
if type == self.STATIC_LIB:
group = 'STATIC_LIBRARIES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name),
'%s.a' % module_name)
if type == self.SHARED_LIB:
group = 'SHARED_LIBRARIES'
module_name = self.android_module(group, name, subdir)
return os.path.join(self.intermediates_dir(group, module_name), 'LINKED',
'%s.so' % module_name)
assert False, 'Unhandled type'
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
This is not correctly implemented for Android. For now, we simply check
that the executable file exists.
"""
# Running executables requires a device. Even if we build for target x86,
# the binary is not built with the correct toolchain options to actually
# run on the host.
# Copied from TestCommon.run()
match = kw.pop('match', self.match)
status = None
if os.path.exists(self.built_file_path(name)):
status = 1
self._complete(None, None, None, None, status, self.match)
def match_single_line(self, lines = None, expected_line = None):
"""
Checks that specified line appears in the text.
"""
for line in lines.split('\n'):
if line == expected_line:
return 1
return
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
"""
kw['stdout'] = ("make: Nothing to be done for `%s'." %
self.target_name(target))
# We need to supply a custom matcher, since we don't want to depend on the
# exact stdout string.
kw['match'] = self.match_single_line
return self.build(gyp_file, target, **kw)
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
# Sub-directory builds provide per-gyp Makefiles (i.e.
# Makefile.gyp_filename), so use that if there is no Makefile.
chdir = kw.get('chdir', '')
if not os.path.exists(os.path.join(chdir, 'Makefile')):
print "NO Makefile in " + os.path.join(chdir, 'Makefile')
arguments.insert(0, '-f')
arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
if sys.platform == 'darwin':
# Mac puts target shared libraries right in the product directory.
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = (
libdir + '.host:' + os.path.join('out', configuration))
else:
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A subdir= keyword argument specifies a library subdirectory within
the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.STATIC_LIB and sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB and sys.platform != 'darwin':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def FindVisualStudioInstallation():
"""Returns appropriate values for .build_tool and .uses_msbuild fields
of TestGypBase for Visual Studio.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
Failing that, we search for likely deployment paths.
"""
possible_roots = ['C:\\Program Files (x86)', 'C:\\Program Files',
'E:\\Program Files (x86)', 'E:\\Program Files']
possible_paths = {
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
msvs_version = 'auto'
for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
msvs_version = flag.split('=')[-1]
msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
build_tool = None
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
build_tool = bt
uses_msbuild = msvs_version >= '2010'
return build_tool, uses_msbuild
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
if build_tool:
# We found 'devenv' on the path, use that and try to guess the version.
for version, path in possible_paths.iteritems():
if build_tool.find(path) >= 0:
uses_msbuild = version >= '2010'
return build_tool, uses_msbuild
else:
# If not, assume not MSBuild.
uses_msbuild = False
return build_tool, uses_msbuild
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version in sorted(possible_paths, reverse=True):
path = possible_paths[version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
build_tool = bt
uses_msbuild = msvs_version >= '2010'
return build_tool, uses_msbuild
print 'Error: could not find devenv'
sys.exit(1)
class TestGypOnMSToolchain(TestGypBase):
"""
Common subclass for testing generators that target the Microsoft Visual
Studio toolchain (cl, link, dumpbin, etc.)
"""
@staticmethod
def _ComputeVsvarsPath(devenv_path):
devenv_dir = os.path.split(devenv_path)[0]
vsvars_path = os.path.join(devenv_path, '../../Tools/vsvars32.bat')
return vsvars_path
def initialize_build_tool(self):
super(TestGypOnMSToolchain, self).initialize_build_tool()
if sys.platform in ('win32', 'cygwin'):
self.devenv_path, self.uses_msbuild = FindVisualStudioInstallation()
self.vsvars_path = TestGypOnMSToolchain._ComputeVsvarsPath(
self.devenv_path)
def run_dumpbin(self, *dumpbin_args):
"""Run the dumpbin tool with the specified arguments, and capturing and
returning stdout."""
assert sys.platform in ('win32', 'cygwin')
cmd = os.environ.get('COMSPEC', 'cmd.exe')
arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
arguments.extend(dumpbin_args)
proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
output = proc.communicate()[0]
assert not proc.returncode
return output
class TestGypNinja(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Ninja generator.
"""
format = 'ninja'
build_tool_list = ['ninja']
ALL = 'all'
DEFAULT = 'all'
def run_gyp(self, gyp_file, *args, **kw):
TestGypBase.run_gyp(self, gyp_file, *args, **kw)
def build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target is None:
target = 'all'
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib')
subdir = kw.get('subdir')
if subdir and type != self.SHARED_LIB:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMSVS(TestGypOnMSToolchain):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
super(TestGypMSVS, self).initialize_build_tool()
self.build_tool = self.devenv_path
def build(self, gyp_file, target=None, rebuild=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])[:]
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
Beware that VS2010 will behave strangely if you build under
C:\USERS\yourname\AppData\Local. It will cause needless work. The ouptut
will be "1 succeeded and 0 up to date". MSBuild tracing reveals that:
"Project 'C:\Users\...\AppData\Local\...vcxproj' not up to date because
'C:\PROGRAM FILES (X86)\MICROSOFT VISUAL STUDIO 10.0\VC\BIN\1033\CLUI.DLL'
was modified at 02/21/2011 17:03:30, which is newer than '' which was
modified at 01/01/0001 00:00:00.
The workaround is to specify a workdir when instantiating the test, e.g.
test = TestGyp.TestGyp(workdir='workarea')
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
up_to_date = m and int(m.group(1)) > 0
if not up_to_date:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypSCons(TestGypBase):
"""
Subclass for testing the GYP SCons generator.
"""
format = 'scons'
build_tool_list = ['scons', 'scons.py']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a scons build using the SCons configuration generated from the
specified gyp_file.
"""
arguments = kw.get('arguments', [])[:]
dirname = os.path.dirname(gyp_file)
if dirname:
arguments.extend(['-C', dirname])
if self.configuration:
arguments.append('--mode=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified SCons target is up to date.
"""
if target in (None, self.DEFAULT):
up_to_date_targets = 'all'
else:
up_to_date_targets = target
up_to_date_lines = []
for arg in up_to_date_targets.split():
up_to_date_lines.append("scons: `%s' is up to date.\n" % arg)
kw['stdout'] = ''.join(up_to_date_lines)
arguments = kw.get('arguments', [])[:]
arguments.append('-Q')
kw['arguments'] = arguments
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by scons.
"""
configuration = self.configuration_dirname()
os.environ['LD_LIBRARY_PATH'] = os.path.join(configuration, 'lib')
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Scons.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type in (self.STATIC_LIB, self.SHARED_LIB):
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_endings = (
'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
)
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
# Be sure we're working with a copy of 'arguments' since we modify it.
# The caller may not be expecting it to be modified.
arguments = kw.get('arguments', [])[:]
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_endings):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
format_class_list = [
TestGypGypd,
TestGypAndroid,
TestGypMake,
TestGypMSVS,
TestGypNinja,
TestGypSCons,
TestGypXcode,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.manager import NeutronManager
from neutron.openstack.common import policy as common_policy
from neutron.openstack.common import uuidutils
from neutron import policy
from neutron import quota
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests.unit import testlib_api
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
EXTDIR = os.path.join(ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy') or
info.get('primary_key')]
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict((arg, mock.ANY)
for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def setUp(self):
super(JSONV2TestCase, self).setUp()
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.iteritems():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True,
extra_environ=env)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_body(self):
data = {'whoa': None}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_no_resource(self):
data = {}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bulk_no_networks(self):
data = {'networks': []}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': unicode(tenant_id)}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
policy.init()
common_policy._rules['get_network:name'] = common_policy.parse_rule(
"rule:admin_only")
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
try:
self.assertNotIn('name', res['network'])
finally:
del common_policy._rules['get_network:name']
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin'
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
self.setup_coreplugin(plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
self.addCleanup(self._plugin_patcher.stop)
self.addCleanup(cfg.CONF.reset)
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class XMLV2TestCase(JSONV2TestCase):
fmt = 'xml'
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False,
notification_level='INFO'):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(fake_notifier.NOTIFICATIONS),
len(expected_events))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual(notification_level, msg['priority'])
self.assertEqual(event, msg['event_type'])
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
def test_network_create_notifer_with_log_level(self):
cfg.CONF.set_override('default_notification_level', 'DEBUG')
self._resource_op_notifier('create', 'network',
notification_level='DEBUG')
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
NeutronManager.get_plugin().supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin():
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
| |
# Copyright 2006-2012 Mark Diekhans
"""
Pairwise alignment. All coordinates are strand-specific
"""
import copy,re
from pycbio.hgdata.psl import PslReader
from pycbio.sys.fileOps import prLine,iterLines
# FIXME: need range/overlap operators
otherStrand = {"+": "-", "-": "+"}
class Coord(object):
"""A coordinate, which can be either absolute or relative to start of strand.
"""
__slots__ = ("seqId", "start", "end", "size", "strand", "isAbs")
def __init__(self, seqId, start, end, size, strand, isAbs): #pylint: disable=R0913
assert((start <= end) and (start <= size) and (end <= size))
assert(strand in ("+", "-"))
self.seqId = seqId
self.start = start
self.end = end
self.size = size
self.strand = strand
self.isAbs = isAbs
def __getstate__(self):
return (self.seqId, self.start, self.end, self.size, self.strand, self.isAbs)
def __setstate__(self, st):
(self.seqId, self.start, self.end, self.size, self.strand, self.isAbs) = st
def __str__(self):
return self.seqId + ":" + str(self.start) + "-" + str(self.end) + "(" \
+ self.strand + ("a" if self.isAbs else "r") + ")"
def toAbs(self):
"""create a new coord object that is in absolute coordinates. Does not change the
strand"""
if self.isAbs or (self.strand == "+"):
return Coord(self.seqId, self.start, self.end, self.size, self.strand, True)
else:
return Coord(self.seqId, (self.size - self.end), (self.size - self.start), self.size, self.strand, True)
def toRel(self):
"""create a new coord object that is in relative coordinates. Does not change the
strand"""
if (not self.isAbs) or (self.strand == "+"):
return Coord(self.seqId, self.start, self.end, self.size, self.strand, False)
else:
return Coord(self.seqId, (self.size - self.end), (self.size - self.start), self.size, self.strand, False)
def toRev(self):
"""create a new coord object that is on the opposite strand, does not change
coordinate system"""
if self.isAbs:
return Coord(self.seqId, self.start, self.end, self.size, self.strand, True)
else:
return Coord(self.seqId, (self.size - self.end), (self.size - self.start), self.size, otherStrand[self.strand], False)
def getRange(self, start, end):
"generate a coord object the same sequence, only using start/end as the bounds"
return Coord(self.seqId, start, end, self.size, self.strand, self.isAbs)
def overlaps(self, start, end):
"determine if a range overlaps"
maxStart = max(self.start, start)
minEnd = min(self.end, end)
return (maxStart < minEnd)
def coordOver(self, coord):
"does another coordinate overlap this one"
if coord.seqId != self.seqId:
return False
elif (self.isAbs or (self.strand == '+')) and (coord.isAbs or (coord.strand == '+')):
# can compare directly
return self.overlaps(coord.start, coord.end)
elif (not self.isAbs) and (not coord.isAbs):
# both relative
return (self.strand == coord.strand) and self.overlaps(coord.start, coord.end)
elif self.isAbs:
assert(not coord.isAbs)
coord = coord.toAbs()
return (self.strand == coord.strand) and self.overlaps(coord.start, coord.end)
else:
assert(not self.isAbs)
aself = self.toAbs()
return (aself.strand == coord.strand) and aself.overlaps(coord.start, coord.end)
class Seq(Coord):
"""Sequence in an alignment, coordinates are relative."""
__slots__ = ("cds")
def __init__(self, seqId, start, end, size, strand, cds=None): #pylint: disable=R0913
Coord.__init__(self, seqId, start, end, size, strand, True)
self.cds = cds
def __getstate__(self):
return (Coord.__getstate__(self), self.cds)
def __setstate__(self, st):
assert(len(st) == 2)
Coord.__setstate__(self, st[0])
self.cds = st[1]
def copy(self):
"make a copy"
return copy.deepcopy(self)
def mkSubSeq(self, start, end):
"create a subsequence"
assert((start >= self.start) and (end <= self.end))
ss = SubSeq(self, start, end)
if self.cds is not None:
st = max(self.cds.start, ss.start)
en = min(self.cds.end, ss.end)
if st < en:
ss.cds = Cds(st, en)
return ss
def revCmpl(self):
"return a reverse complment of this object"
cds = self.cds.revCmpl(self.size) if self.cds is not None else None
strand = '-' if (self.strand == '+') else '+'
return Seq(self.seqId, self.size-self.end, self.size-self.start, self.size, strand, cds)
def __str__(self):
return self.seqId + ":" + str(self.start) + "-" + str(self.end) + "/" \
+ self.strand + " sz: " + str(self.size) + " cds: " + str(self.cds)
def __len__(self):
"get sequence length"
return self.end-self.start
def updateCds(self, cdsStart, cdsEnd):
"""Update the CDS bounds to include specified range, triming to bounds"""
cdsStart = max(self.start, cdsStart)
cdsEnd = min(self.end, cdsEnd)
if self.cds is None:
self.cds = Cds(cdsStart, cdsEnd)
else:
self.cds.start = min(self.cds.start, cdsStart)
self.cds.end = max(self.cds.end, cdsEnd)
class SubSeq(object):
"subsequence in alignment"
__slots__ = ("seq", "start", "end", "cds")
def __init__(self, seq, start, end, cds=None):
self.seq = seq
self.start = start
self.end = end
self.cds = cds
def __getstate__(self):
return (self.seq, self.start, self.end, self.cds)
def __setstate__(self, st):
(self.seq, self.start, self.end, self.cds) = st
def copy(self, destSeq):
"create a copy, associating with new Seq object"
return SubSeq(destSeq, self.start, self.end,
copy.copy(self.cds))
def __str__(self):
return str(self.start) + "-" + str(self.end) \
+ ((" CDS: " + str(self.cds)) if (self.cds is not None) else "")
def locStr(self):
"get string describing location"
return self.seq.seqId + ":" + str(self.start) + "-" + str(self.end)
def revCmpl(self, revSeq):
"return a reverse complment of this object for revSeq"
cds = self.cds.revCmpl(revSeq.size) if self.cds is not None else None
return SubSeq(revSeq, revSeq.size-self.end, revSeq.size-self.start, cds)
def __len__(self):
"get sequence length"
return self.end-self.start
def overlaps(self, start, end):
"determine if subseqs overlap"
maxStart = max(self.start, start)
minEnd = min(self.end, end)
return (maxStart < minEnd)
def updateCds(self, cdsStart, cdsEnd):
"""Update the CDS bounds and the sequence CDS. This expands existing
CDS, it doesn't replace it. If cds range exceeds block bounds,
it is adjusted"""
cdsStart = max(self.start, cdsStart)
cdsEnd = min(self.end, cdsEnd)
if self.cds is None:
self.cds = Cds(cdsStart, cdsEnd)
else:
self.cds.start = min(self.cds.start, cdsStart)
self.cds.end = max(self.cds.end, cdsEnd)
self.seq.updateCds(self.cds.start, self.cds.end)
class SubSeqs(list):
"""list of subseqs of in blks, or None if one side is unaligned. Used
to for functions that can operate on either side of alignment."""
__slots__ = ("seq")
def __init__(self, seq):
list.__init__(self)
self.seq = seq
def __getstate__(self):
return (self.seq,)
def __setstate__(self, st):
(self.seq,) = st
def clearCds(self):
"clear CDS in sequences and subseqs"
self.seq.cds = None
for ss in self:
if ss is not None:
ss.cds = None
def findFirstCdsIdx(self):
"find index of first SubSeq with CDS"
for i in xrange(len(self)):
if (self[i] is not None) and (self[i].cds is not None):
return i
return None
def findLastCdsIdx(self):
"find index of first SubSeq with CDS"
for i in xrange(len(self)-1, -1, -1):
if (self[i] is not None) and (self[i].cds is not None):
return i
return None
class Cds(object):
"range or subrange of CDS"
__slots__ = ("start", "end")
def __init__(self, start, end):
assert(start < end)
self.start = start
self.end = end
def __getstate__(self):
return (self.start, self.end)
def __setstate__(self, st):
(self.start, self.end) = st
def revCmpl(self, psize):
"return a reverse complment of this object, given parent seq or subseq size"
return Cds(psize-self.end, psize-self.start)
def __str__(self):
return str(self.start) + "-" + str(self.end)
def __len__(self):
"get CDS length"
return self.end-self.start
class Block(object):
"""Block in alignment, query or target SubSeq can be None. Links allow
for simple traversing"""
__slots__ = ("aln", "q", "t", "prev", "next")
def __init__(self, aln, q, t):
assert((q is None) or (t is None) or (len(q) == len(t)))
self.aln = aln
self.q = q
self.t = t
# FIXME: remove???
self.prev = self.next = None
def __getstate__(self):
return (self.aln, self.q, self.t, self.prev, self.next)
def __setstate__(self, st):
(self.aln, self.q, self.t, self.prev, self.next)= st
def __len__(self):
"length of block"
return len(self.q) if (self.q is not None) else len(self.t)
def __str__(self):
return str(self.q) + " <=> " + str(self.t)
def isAln(self):
"is this block aligned?"
return (self.q is not None) and (self.t is not None)
def isQIns(self):
"is this block a query insert?"
return (self.q is not None) and (self.t is None)
def isTIns(self):
"is this block a target insert?"
return (self.q is None) and (self.t is not None)
@staticmethod
def __subToRow(seq, sub):
if sub is not None:
return [seq.seqId, sub.start, sub.end]
else:
return [seq.seqId, None, None]
def toRow(self):
"convert to list of query and target coords"
return self.__subToRow(self.aln.qSeq, self.q) + self.__subToRow(self.aln.tSeq, self.t)
def dump(self, fh):
"print content to file"
prLine(fh, "\t> query: ", self.q)
prLine(fh, "\t target: ", self.t)
class PairAlign(list):
"""List of alignment blocks"""
__slots__ = ("qSeq", "tSeq", "qSubSeqs", "tSubSeqs")
def __init__(self, qSeq, tSeq):
list.__init__(self)
self.qSeq = qSeq
self.tSeq = tSeq
self.qSubSeqs = SubSeqs(qSeq)
self.tSubSeqs = SubSeqs(tSeq)
def __getstate__(self):
return (self.qSeq, self.tSeq, self.qSubSeqs, self.tSubSeqs)
def __setstate__(self, st):
(self.qSeq, self.tSeq, self.qSubSeqs, self.tSubSeqs) = st
def copy(self):
"make a deep copy of this object"
# N.B. can't use copy.deepcopy for whole object, since there are
# circular links
daln = PairAlign(self.qSeq.copy(), self.tSeq.copy())
for sblk in self:
daln.addBlk(sblk.q.copy(daln.qSeq) if (sblk.q is not None) else None,
sblk.t.copy(daln.tSeq) if (sblk.t is not None) else None)
return daln
def addBlk(self, q, t):
blk = Block(self, q, t)
if len(self) > 0:
self[-1].next = blk
blk.prev = self[-1]
self.append(blk)
self.qSubSeqs.append(blk.q)
self.tSubSeqs.append(blk.t)
return blk
def revCmpl(self):
"return a reverse complment of this object"
qSeq = self.qSeq.revCmpl()
tSeq = self.tSeq.revCmpl()
aln = PairAlign(qSeq, tSeq)
for i in xrange(len(self)-1, -1, -1):
blk = self[i]
aln.addBlk((None if (blk.q is None) else blk.q.revCmpl(qSeq)),
(None if (blk.t is None) else blk.t.revCmpl(tSeq)))
return aln
def anyTOverlap(self, other):
"determine if the any target blocks overlap"
if (self.tSeq.seqId != other.tSeq.seqId) or (self.tSeq.strand != other.tSeq.strand):
return False
oblk = other[0]
for blk in self:
if blk.isAln():
while oblk is not None:
if oblk.isAln():
if oblk.t.start > blk.t.end:
return False
elif blk.t.overlaps(oblk.t.start, oblk.t.end):
return True
oblk = oblk.next
if oblk is None:
break
return False
@staticmethod
def __projectBlkCds(srcSs, destSs, contained):
"project CDS from one subseq to another"
if destSs is not None:
if (srcSs is not None) and (srcSs.cds is not None):
start = destSs.start+(srcSs.cds.start-srcSs.start)
end = destSs.start+(srcSs.cds.end-srcSs.start)
destSs.updateCds(start, end)
elif contained:
destSs.updateCds(destSs.start, destSs.end)
@staticmethod
def __projectCds(srcSubSeqs, destSubSeqs, contained):
"project CDS from one alignment side to the other"
assert(srcSubSeqs.seq.cds is not None)
destSubSeqs.clearCds()
for i in xrange(srcSubSeqs.findFirstCdsIdx(), srcSubSeqs.findLastCdsIdx()+1, 1):
PairAlign.__projectBlkCds(srcSubSeqs[i], destSubSeqs[i], contained)
def targetOverlap(self, o):
"do the target ranges overlap"
return ((self.tSeq.seqId == o.tSeq.seqId)
and (self.tSeq.strand == o.tSeq.strand)
and (self.tSeq.start < o.tSeq.end)
and (self.tSeq.end > o.tSeq.start))
def projectCdsToTarget(self, contained=False):
"""project CDS from query to target. If contained is True, assign CDS
to subseqs between beginning and end of projected CDS"""
PairAlign.__projectCds(self.qSubSeqs, self.tSubSeqs, contained)
def projectCdsToQuery(self, contained=False):
"""project CDS from target to query If contained is True, assign CDS
to subseqs between beginning and end of projected CDS"""
PairAlign.__projectCds(self.tSubSeqs, self.qSubSeqs, contained)
def getSubseq(self, seq):
"find the corresponding subSeq array"
if seq == self.qSeq:
return self.qSubSeqs
elif seq == self.tSeq:
return self.tSubSeqs
else:
raise Exception("seq is not part of this alignment")
@staticmethod
def __mapCdsToSubSeq(destSs, srcSubSeqs, si):
"find overlapping src blks and assign cds, incrementing si as needed"
sl = len(srcSubSeqs)
lastSi = si
while si < sl:
srcSs = srcSubSeqs[si]
if srcSs is not None:
if (srcSs.cds is not None) and destSs.overlaps(srcSs.cds.start, srcSs.cds.end) :
destSs.updateCds(srcSs.cds.start, srcSs.cds.end)
elif destSs.start > srcSs.end:
break
lastSi = si
si += 1
return lastSi
@staticmethod
def __mapCdsForOverlap(srcSubSeqs, destSubSeqs):
"map CDS only to block overlapping src in common sequence"
si = di = 0
sl = len(srcSubSeqs)
dl = len(destSubSeqs)
while (si < sl) and (di < dl):
if destSubSeqs[di] is not None:
si = PairAlign.__mapCdsToSubSeq(destSubSeqs[di], srcSubSeqs, si)
di += 1
@staticmethod
def __mapCdsForContained(srcSubSeqs, destSubSeqs):
"assign CDS for all blks contained in srcSubSeq CDS range"
cdsStart = srcSubSeqs[PairAlign.findFirstCdsIdx(srcSubSeqs)].cds.start
cdsEnd = srcSubSeqs[PairAlign.findLastCdsIdx(srcSubSeqs)].cds.end
for destSs in destSubSeqs:
if (destSs is not None) and destSs.overlaps(cdsStart, cdsEnd):
destSs.updateCds(max(cdsStart, destSs.start),
min(cdsEnd, destSs.end))
def mapCds(self, srcAln, srcSeq, destSeq, contained=False):
"""map CDS from one alignment to this one via a comman sequence.
If contained is True, assign CDS to subseqs between beginning and
end of mapped CDS"""
assert(srcSeq.cds is not None)
assert((destSeq == self.qSeq) or (destSeq == self.tSeq))
assert((srcSeq.seqId == destSeq.seqId) and (srcSeq.strand == destSeq.strand))
srcSubSeqs = srcAln.getSubseq(srcSeq)
destSubSeqs = self.getSubseq(destSeq)
destSubSeqs.clearCds()
if contained:
PairAlign.__mapCdsForOverlap(srcSubSeqs, destSubSeqs)
else:
PairAlign.__mapCdsForOverlap(srcSubSeqs, destSubSeqs)
def clearCds(self):
"clear both query and target CDS, if any"
if self.qSeq.cds is not None:
self.qSubSeqs.clearCds()
self.qSeq.cds = None
if self.tSeq.cds is not None:
self.tSubSeqs.clearCds()
self.tSeq.cds = None
def dump(self, fh):
"print content to file"
prLine(fh, "query: ", self.qSeq)
prLine(fh, "target: ", self.tSeq)
for blk in self:
blk.dump(fh)
def _getCds(cdsRange, strand, size):
if cdsRange is None:
return None
cds = Cds(cdsRange[0], cdsRange[1])
if strand == '-':
cds = cds.revCmpl(size)
return cds
def _mkPslSeq(name, start, end, size, strand, cds=None):
"make a seq from a PSL q or t, reversing range is neg strand"
if strand == '-':
return Seq(name, size-end, size-start, size, strand, cds)
else:
return Seq(name, start, end, size, strand, cds)
def _addPslBlk(pslBlk, aln, prevBlk, inclUnaln):
"""add an aligned block, and optionally preceeding unaligned blocks"""
qStart = pslBlk.qStart
qEnd = pslBlk.qEnd
tStart = pslBlk.tStart
tEnd = pslBlk.tEnd
if inclUnaln and (prevBlk is not None):
if qStart > prevBlk.q.end:
aln.addBlk(aln.qSeq.mkSubSeq(prevBlk.q.end, qStart), None)
if tStart > prevBlk.t.end:
aln.addBlk(None, aln.tSeq.mkSubSeq(prevBlk.t.end, tStart))
blk = aln.addBlk(aln.qSeq.mkSubSeq(qStart, qEnd),
aln.tSeq.mkSubSeq(tStart, tEnd))
return blk
def fromPsl(psl, qCdsRange=None, inclUnaln=False, projectCds=False, contained=False):
"""generate a PairAlign from a PSL. cdsRange is None or a tuple. In
inclUnaln is True, then include Block objects for unaligned regions"""
qCds = _getCds(qCdsRange, psl.getQStrand(), psl.qSize)
qSeq = _mkPslSeq(psl.qName, psl.qStart, psl.qEnd, psl.qSize, psl.getQStrand(), qCds)
tSeq = _mkPslSeq(psl.tName, psl.tStart, psl.tEnd, psl.tSize, psl.getTStrand())
aln = PairAlign(qSeq, tSeq)
prevBlk = None
for i in xrange(psl.blockCount):
prevBlk = _addPslBlk(psl.blocks[i], aln, prevBlk, inclUnaln)
if projectCds and (aln.qSeq.cds is not None):
aln.projectCdsToTarget(contained)
return aln
class CdsTable(dict):
"""table mapping ids to tuple of zero-based (start end),
Load from CDS seperated file in form:
cds start..end
"""
def __init__(self, cdsFile):
dict.__init__(self)
for line in iterLines(cdsFile):
if not line.startswith('#'):
self.__parseCds(line)
__parseRe = re.compile("^([^\t]+)\t([0-9]+)\\.\\.([0-9]+)$")
def __parseCds(self, line):
m = self.__parseRe.match(line)
if m is None:
raise Exception("can't parse CDS line: " + line)
st = int(m.group(2))-1
en = int(m.group(3))-1
self[m.group(1)] = (st, en)
def loadPslFile(pslFile, cdsFile=None, inclUnaln=False, projectCds=False, contained=False):
"build list of PairAlign from a PSL file and optional CDS file"
cdsTbl = CdsTable(cdsFile) if (cdsFile is not None) else {}
alns = []
for psl in PslReader(pslFile):
alns.append(fromPsl(psl, cdsTbl.get(psl.qName), inclUnaln, projectCds, contained))
return alns
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance test for the Oppia reader view.
Before running this script, exploration 0 should be loaded in the target
server.
Run this script from the Oppia root directory:
python core/tests/reader_view_load_test.py --thread_count=5 --start_uid=1 \
https://my-oppia-instance.appspot.com
"""
__author__ = 'Sean Lip (sll@google.com)'
import argparse
import cookielib
import json
import logging
import sys
import threading
import time
import urllib
import urllib2
XSSI_PREFIX = ')]}\'\n'
# command line arguments parser
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'base_url', help=('Base URL of the Oppia installation to test'), type=str)
PARSER.add_argument(
'--start_uid',
help='Initial value for unique thread identifier.', default=1, type=int)
PARSER.add_argument(
'--thread_count',
help='Number of concurrent threads for executing the test.',
default=1, type=int)
PARSER.add_argument(
'--iteration_count',
help='Number of iterations for executing the test. Each thread of each '
'iteration acts as a unique user with the uid equal to:'
'start_uid + thread_count * iteration_index.',
default=1, type=int)
def assert_contains(needle, haystack):
if needle not in haystack:
raise Exception('Expected to find term: %s\n%s', needle, haystack)
def assert_does_not_contain(needle, haystack):
if needle in haystack:
raise Exception(
'Did not expect to find term: %s\n%s', needle, haystack)
def assert_equals(expected, actual):
if expected != actual:
raise Exception('Expected equality of %s and %s.', expected, actual)
class WebSession(object):
"""A class that allows navigation of web pages keeping cookie session."""
PROGRESS_LOCK = threading.Lock()
MAX_RETRIES = 3
RETRY_SLEEP_SEC = 3
GET_COUNT = 0
POST_COUNT = 0
RETRY_COUNT = 0
PROGRESS_BATCH = 10
RESPONSE_TIME_HISTOGRAM = [0, 0, 0, 0, 0, 0]
def __init__(self, uid, common_headers=None):
if common_headers is None:
common_headers = {}
self.uid = uid
self.common_headers = common_headers
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cj))
@classmethod
def increment_duration_bucket(cls, index):
cls.RESPONSE_TIME_HISTOGRAM[index] += 1
@classmethod
def update_duration(cls, duration):
if duration > 30:
cls.increment_duration_bucket(0)
elif duration > 15:
cls.increment_duration_bucket(1)
elif duration > 7:
cls.increment_duration_bucket(2)
elif duration > 3:
cls.increment_duration_bucket(3)
elif duration > 1:
cls.increment_duration_bucket(4)
else:
cls.increment_duration_bucket(5)
@classmethod
def log_progress(cls, force=False):
update = ((cls.GET_COUNT + cls.POST_COUNT) % (
cls.PROGRESS_BATCH) == 0)
if update or force:
logging.info(
'GET/POST:[%s, %s], RETRIES:[%s], SLA:%s',
cls.GET_COUNT, cls.POST_COUNT, cls.RETRY_COUNT,
cls.RESPONSE_TIME_HISTOGRAM)
def get_cookie_value(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie.value
return None
def is_soft_error(self, http_error):
"""Checks if HTTPError is due to starvation of frontend instances."""
body = http_error.fp.read()
# this is the text specific to the front end instance starvation, which
# is a retriable error for both GET and POST; normal HTTP error 500 has
# this specific text '<h1>500 Internal Server Error</h1>'
if http_error.code == 500 and '<h1>Error: Server Error</h1>' in body:
return True
logging.error(
'Non-retriable HTTP %s error:\n%s', http_error.code, body)
return False
def open(self, request, hint):
"""Executes any HTTP request."""
start_time = time.time()
try:
try_count = 0
while True:
try:
return self.opener.open(request)
except urllib2.HTTPError as he:
if (try_count < WebSession.MAX_RETRIES and
self.is_soft_error(he)):
try_count += 1
with WebSession.PROGRESS_LOCK:
WebSession.RETRY_COUNT += 1
time.sleep(WebSession.RETRY_SLEEP_SEC)
continue
raise he
except Exception as e:
logging.info(
'Error in session %s executing: %s', self.uid, hint)
raise e
finally:
with WebSession.PROGRESS_LOCK:
self.update_duration(time.time() - start_time)
def get(self, url, expected_code=200):
"""HTTP GET."""
with WebSession.PROGRESS_LOCK:
WebSession.GET_COUNT += 1
self.log_progress()
request = urllib2.Request(url)
for key, value in self.common_headers.items():
request.add_header(key, value)
response = self.open(request, 'GET %s' % url)
assert_equals(expected_code, response.code)
return response.read()
def post(self, url, args_dict, expected_code=200):
"""HTTP POST."""
with WebSession.PROGRESS_LOCK:
WebSession.POST_COUNT += 1
self.log_progress()
data = urllib.urlencode(args_dict)
request = urllib2.Request(url, data)
response = self.open(request, 'POST %s' % url)
assert_equals(expected_code, response.code)
return response.read()
class TaskThread(threading.Thread):
"""Runs a task in a separate thread."""
def __init__(self, func, name=None):
super(TaskThread, self).__init__()
self.func = func
self.exception = None
self.name = name
@classmethod
def start_all_tasks(cls, tasks):
"""Starts all tasks."""
for task in tasks:
task.start()
@classmethod
def check_all_tasks(cls, tasks):
"""Checks results of all tasks; fails on the first exception found."""
failed_count = 0
for task in tasks:
while True:
# Timeouts should happen after 30 seconds.
task.join(30)
if task.isAlive():
logging.info('Still waiting for: %s.', task.name)
continue
else:
break
if task.exception:
failed_count += 1
if failed_count:
raise Exception('Tasks failed: %s', failed_count)
@classmethod
def execute_task_list(cls, tasks):
"""Starts all tasks and checks the results."""
cls.start_all_tasks(tasks)
cls.check_all_tasks(tasks)
def run(self):
try:
self.func()
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Error in %s: %s', self.name, e)
self.exc_info = sys.exc_info()
raise self.exc_info[1], None, self.exc_info[2]
class ReaderViewLoadTest(object):
"""A reader view load test."""
def __init__(self, base_url, uid):
self.uid = uid
self.host = base_url
self.exp_id = None
self.last_state_name = None
self.last_params = None
self.state_history = None
self.session = WebSession(uid=uid)
def run(self):
self.init_player(
'0', 'Welcome to Oppia!', 'do you know where the name \'Oppia\'')
self.submit_and_compare(
'0', 'In fact, the word Oppia means \'learn\'.')
self.submit_and_compare('Finish', 'Check your spelling!')
self.submit_and_compare(
'Finnish', 'Yes! Oppia is the Finnish word for learn.')
def _get(self, url):
return self.session.get(url)
def _get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_body = self.session.get(url)
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def _post(self, url, data):
return self.session.post(url, data)
def _post_json(self, url, data):
"""Post a JSON request, returning the response as a Python object."""
json_body = self.session.post(str(url), {'payload': json.dumps(data)})
if not json_body.startswith(XSSI_PREFIX):
raise Exception('Expected an XSSI prefix; found none.')
return json.loads(json_body[len(XSSI_PREFIX):])
def init_player(self, exploration_id, expected_title, expected_response):
self.exp_id = exploration_id
body = self._get('%s/explore/%s' % (self.host, self.exp_id))
assert_contains('Learn', body)
assert_contains('Return to the gallery', body)
body = self._get_json(
'%s/explorehandler/init/%s' % (self.host, self.exp_id))
assert_equals(body['title'], expected_title)
assert_contains(expected_response, body['init_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history = [self.last_state_name]
def submit_and_compare(self, answer, expected_response):
url = '%s/explorehandler/transition/%s/%s' % (
self.host, self.exp_id, urllib.quote(self.last_state_name))
body = self._post_json(url, {
'answer': answer, 'params': self.last_params,
'state_history': self.state_history,
})
assert_contains(expected_response, body['oppia_html'])
self.last_state_name = body['state_name']
self.last_params = body['params']
self.state_history += [self.last_state_name]
def run_all(args):
"""Runs test scenario in multiple threads."""
if args.thread_count < 1 or args.thread_count > 256:
raise Exception('Please use between 1 and 256 threads.')
if not args.base_url:
raise Exception('Please specify a base URL to load-test against.')
start_time = time.time()
logging.info('Started testing: %s', args.base_url)
logging.info('base_url: %s', args.base_url)
logging.info('start_uid: %s', args.start_uid)
logging.info('thread_count: %s', args.thread_count)
logging.info('iteration_count: %s', args.iteration_count)
logging.info('SLAs are [>30s, >15s, >7s, >3s, >1s, <1s]')
try:
for iteration_index in range(0, args.iteration_count):
logging.info('Started iteration: %s', iteration_index)
tasks = []
WebSession.PROGRESS_BATCH = args.thread_count
for index in range(0, args.thread_count):
test = ReaderViewLoadTest(args.base_url, (
args.start_uid + iteration_index * args.thread_count +
index))
task = TaskThread(
test.run, name='ReaderViewLoadTest-%s' % index)
tasks.append(task)
try:
TaskThread.execute_task_list(tasks)
except Exception as e:
logging.info('Failed iteration: %s', iteration_index)
raise e
finally:
WebSession.log_progress(force=True)
logging.info('Done! Duration (s): %s', time.time() - start_time)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
run_all(PARSER.parse_args())
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import tempfile
from collections import defaultdict, namedtuple
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.thrift.java.thrift_defaults import ThriftDefaults
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.memo import memoized_method, memoized_property
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.java_thrift_library_fingerprint_strategy import \
JavaThriftLibraryFingerprintStrategy
from pants.contrib.scrooge.tasks.thrift_util import calculate_compile_sources
_RPC_STYLES = frozenset(['sync', 'finagle', 'ostrich'])
class ScroogeGen(SimpleCodegenTask, NailgunTask):
DepInfo = namedtuple('DepInfo', ['service', 'structs'])
PartialCmd = namedtuple('PartialCmd', [
'language',
'namespace_map',
'default_java_namespace',
'include_paths',
'compiler_args'
])
@classmethod
def register_options(cls, register):
super(ScroogeGen, cls).register_options(register)
register('--verbose', type=bool, help='Emit verbose output.')
register('--strict', fingerprint=True, type=bool,
help='Enable strict compilation.')
register('--service-deps', default={}, advanced=True, type=dict,
help='A map of language to targets to add as dependencies of '
'synthetic thrift libraries that contain services.')
register('--structs-deps', default={}, advanced=True, type=dict,
help='A map of language to targets to add as dependencies of '
'synthetic thrift libraries that contain structs.')
register('--target-types',
default={'scala': 'scala_library', 'java': 'java_library', 'android': 'java_library'},
advanced=True,
type=dict,
help='Registered target types.')
cls.register_jvm_tool(register, 'scrooge-gen')
@classmethod
def subsystem_dependencies(cls):
return super(ScroogeGen, cls).subsystem_dependencies() + (ThriftDefaults,)
@classmethod
def product_types(cls):
return ['java', 'scala']
@classmethod
def implementation_version(cls):
return super(ScroogeGen, cls).implementation_version() + [('ScroogeGen', 3)]
@classmethod
def get_fingerprint_strategy(cls):
return JavaThriftLibraryFingerprintStrategy(ThriftDefaults.global_instance())
def __init__(self, *args, **kwargs):
super(ScroogeGen, self).__init__(*args, **kwargs)
self._thrift_defaults = ThriftDefaults.global_instance()
self._depinfo = None
# TODO(benjy): Use regular os-located tmpfiles, as we do everywhere else.
def _tempname(self):
# don't assume the user's cwd is buildroot
pants_workdir = self.get_options().pants_workdir
tmp_dir = os.path.join(pants_workdir, 'tmp')
safe_mkdir(tmp_dir)
fd, path = tempfile.mkstemp(dir=tmp_dir, prefix='')
os.close(fd)
return path
def _resolve_deps(self, depmap):
"""Given a map of gen-key=>target specs, resolves the target specs into references."""
deps = defaultdict(lambda: OrderedSet())
for category, depspecs in depmap.items():
dependencies = deps[category]
for depspec in depspecs:
dep_address = Address.parse(depspec)
try:
self.context.build_graph.maybe_inject_address_closure(dep_address)
dependencies.add(self.context.build_graph.get_target(dep_address))
except AddressLookupError as e:
raise AddressLookupError('{}\n referenced from {} scope'.format(e, self.options_scope))
return deps
def _validate_language(self, target):
language = self._thrift_defaults.language(target)
if language not in self._registered_language_aliases():
raise TargetDefinitionException(
target,
'language {} not supported: expected one of {}.'.format(language, self._registered_language_aliases().keys()))
return language
def _validate_rpc_style(self, target):
rpc_style = self._thrift_defaults.rpc_style(target)
if rpc_style not in _RPC_STYLES:
raise TargetDefinitionException(
target,
'rpc_style {} not supported: expected one of {}.'.format(rpc_style, _RPC_STYLES))
return rpc_style
@memoized_method
def _registered_language_aliases(self):
return self.get_options().target_types
@memoized_method
def _target_type_for_language(self, language):
alias_for_lang = self._registered_language_aliases()[language]
registered_aliases = self.context.build_file_parser.registered_aliases()
target_types = registered_aliases.target_types_by_alias.get(alias_for_lang, None)
if not target_types:
raise TaskError('Registered target type `{0}` for language `{1}` does not exist!'.format(alias_for_lang, language))
if len(target_types) > 1:
raise TaskError('More than one target type registered for language `{0}`'.format(language))
return next(iter(target_types))
def execute_codegen(self, target, target_workdir):
self._validate_compiler_configs([target])
self._must_have_sources(target)
def compiler_args_has_rpc_style(compiler_args):
return "--finagle" in compiler_args or "--ostrich" in compiler_args
def merge_rpc_style_with_compiler_args(compiler_args, rpc_style):
new_compiler_args = list(compiler_args)
# ignore rpc_style if we think compiler_args is setting it
if not compiler_args_has_rpc_style(compiler_args):
if rpc_style == 'ostrich':
new_compiler_args.append('--finagle')
new_compiler_args.append('--ostrich')
elif rpc_style == 'finagle':
new_compiler_args.append('--finagle')
return new_compiler_args
namespace_map = self._thrift_defaults.namespace_map(target)
compiler_args = merge_rpc_style_with_compiler_args(
self._thrift_defaults.compiler_args(target),
self._validate_rpc_style(target))
partial_cmd = self.PartialCmd(
language=self._validate_language(target),
namespace_map=tuple(sorted(namespace_map.items())) if namespace_map else (),
default_java_namespace=self._thrift_defaults.default_java_namespace(target),
include_paths=target.include_paths,
compiler_args=compiler_args)
self.gen(partial_cmd, target, target_workdir)
def gen(self, partial_cmd, target, target_workdir):
import_paths, _ = calculate_compile_sources([target], self.is_gentarget)
args = list(partial_cmd.compiler_args)
if partial_cmd.default_java_namespace:
args.extend(['--default-java-namespace', partial_cmd.default_java_namespace])
for import_path in import_paths:
args.extend(['--import-path', import_path])
args.extend(['--language', partial_cmd.language])
for lhs, rhs in partial_cmd.namespace_map:
args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])
args.extend(['--dest', target_workdir])
if not self.get_options().strict:
args.append('--disable-strict')
if partial_cmd.include_paths:
for include_path in partial_cmd.include_paths:
args.extend(['--include-path', include_path])
if self.get_options().verbose:
args.append('--verbose')
gen_file_map_path = os.path.relpath(self._tempname())
args.extend(['--gen-file-map', gen_file_map_path])
args.extend(target.sources_relative_to_buildroot())
classpath = self.tool_classpath('scrooge-gen')
jvm_options = list(self.get_options().jvm_options)
jvm_options.append('-Dfile.encoding=UTF-8')
returncode = self.runjava(classpath=classpath,
main='com.twitter.scrooge.Main',
jvm_options=jvm_options,
args=args,
workunit_name='scrooge-gen')
if 0 != returncode:
raise TaskError('Scrooge compiler exited non-zero for {} ({})'.format(target, returncode))
@staticmethod
def _declares_exception(source):
# ideally we'd use more sophisticated parsing
exception_parser = re.compile(r'^\s*exception\s+(?:[^\s{]+)')
return ScroogeGen._has_declaration(source, exception_parser)
@staticmethod
def _declares_service(source):
# ideally we'd use more sophisticated parsing
service_parser = re.compile(r'^\s*service\s+(?:[^\s{]+)')
return ScroogeGen._has_declaration(source, service_parser)
@staticmethod
def _has_declaration(source, regex):
source_path = os.path.join(get_buildroot(), source)
with open(source_path) as thrift:
return any(line for line in thrift if regex.search(line))
def parse_gen_file_map(self, gen_file_map_path, outdir):
d = defaultdict(set)
with safe_open(gen_file_map_path, 'r') as deps:
for dep in deps:
src, cls = dep.strip().split('->')
src = os.path.relpath(src.strip())
cls = os.path.relpath(cls.strip(), outdir)
d[src].add(cls)
return d
def is_gentarget(self, target):
if not isinstance(target, JavaThriftLibrary):
return False
# We only handle requests for 'scrooge' compilation and not, for example 'thrift', aka the
# Apache thrift compiler
return self._thrift_defaults.compiler(target) == 'scrooge'
def _validate_compiler_configs(self, targets):
assert len(targets) == 1, ("TODO: This method now only ever receives one target. Simplify.")
ValidateCompilerConfig = namedtuple('ValidateCompilerConfig', ['language', 'rpc_style'])
def compiler_config(tgt):
# Note compiler is not present in this signature. At this time
# Scrooge and the Apache thrift generators produce identical
# java sources, and the Apache generator does not produce scala
# sources. As there's no permutation allowing the creation of
# incompatible sources with the same language+rpc_style we omit
# the compiler from the signature at this time.
return ValidateCompilerConfig(language=self._thrift_defaults.language(tgt),
rpc_style=self._thrift_defaults.rpc_style(tgt))
mismatched_compiler_configs = defaultdict(set)
for target in filter(lambda t: isinstance(t, JavaThriftLibrary), targets):
mycompilerconfig = compiler_config(target)
def collect(dep):
if mycompilerconfig != compiler_config(dep):
mismatched_compiler_configs[target].add(dep)
target.walk(collect, predicate=lambda t: isinstance(t, JavaThriftLibrary))
if mismatched_compiler_configs:
msg = ['Thrift dependency trees must be generated with a uniform compiler configuration.\n\n']
for tgt in sorted(mismatched_compiler_configs.keys()):
msg.append('%s - %s\n' % (tgt, compiler_config(tgt)))
for dep in mismatched_compiler_configs[tgt]:
msg.append(' %s - %s\n' % (dep, compiler_config(dep)))
raise TaskError(''.join(msg))
def _must_have_sources(self, target):
if isinstance(target, JavaThriftLibrary) and not target.payload.sources.source_paths:
raise TargetDefinitionException(target, 'no thrift files found')
def synthetic_target_type(self, target):
language = self._thrift_defaults.language(target)
return self._target_type_for_language(language)
def synthetic_target_extra_dependencies(self, target, target_workdir):
deps = OrderedSet(self._thrift_dependencies_for_target(target))
deps.update(target.dependencies)
return deps
def _thrift_dependencies_for_target(self, target):
dep_info = self._resolved_dep_info
target_declares_service_or_exception = any(self._declares_service(source) or self._declares_exception(source)
for source in target.sources_relative_to_buildroot())
language = self._thrift_defaults.language(target)
if target_declares_service_or_exception:
return dep_info.service[language]
else:
return dep_info.structs[language]
@memoized_property
def _resolved_dep_info(self):
return ScroogeGen.DepInfo(self._resolve_deps(self.get_options().service_deps),
self._resolve_deps(self.get_options().structs_deps))
@property
def _copy_target_attributes(self):
return ['provides', 'strict_deps', 'fatal_warnings']
| |
from __future__ import division
from percept.tasks.base import Task
from percept.tasks.train import Train
from percept.fields.base import Complex, List, Dict, Float
from inputs.inputs import SimpsonsFormats
from percept.utils.models import RegistryCategories, get_namespace
import logging
from percept.tests.framework import Tester
from percept.conf.base import settings
import re
import os
from matplotlib import pyplot
import numpy as np
from scikits.audiolab import Sndfile
from scikits.audiolab import oggread
import pandas as pd
from multiprocessing import Pool, TimeoutError
from sklearn.ensemble import RandomForestClassifier
import math
import random
from itertools import chain
log = logging.getLogger(__name__)
class LoadAudioFiles(Task):
data = Complex()
all_files = List()
seq = Complex()
res = Complex()
label_codes = Dict()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
help_text = "Cleanup simpsons scripts."
args = {
'audio_dir' : settings.AUDIO_DIR,
'timeout' : 600,
'only_labelled_lines' : settings.ONLY_LABELLED_LINES,
'processed_files_limit' : settings.PROCESSED_FILES_LIMIT
}
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.data = self.predict(data, **kwargs)
def extract_season(self,name):
match1 = re.search('\[(\d+)[x\.](\d+)\]',name)
if match1 is not None:
season = match1.group(1)
episode = match1.group(2)
return int(season),int(episode)
match2 = re.search('S(\d+)E(\d+)',name)
if match2 is not None:
season = match2.group(1)
episode = match2.group(2)
return int(season),int(episode)
return None, None
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
p = Pool(4, maxtasksperchild=50)
audio_dir = kwargs['audio_dir']
timeout = kwargs['timeout']
oll = kwargs['only_labelled_lines']
pff = kwargs['processed_files_limit']
all_files = []
for ad in os.listdir(audio_dir):
ad_path = os.path.abspath(os.path.join(audio_dir,ad))
if os.path.isdir(ad_path):
files = os.listdir(ad_path)
all_files += [os.path.abspath(os.path.join(ad_path,f)) for f in files]
else:
all_files += [ad_path]
self.all_files = [f for f in all_files if f.endswith(".ogg")]
frames = []
counter = 0
for f in self.all_files:
season,episode = self.extract_season(f)
if season is None or (season==11 and episode==6):
continue
subtitle_frame = data[((data['season']==season) & (data['episode']==episode))]
if subtitle_frame.shape[0]==0:
continue
#To cause loop to end early, remove if needed
if oll:
label_frame = subtitle_frame[(subtitle_frame['label']!="")]
if label_frame.shape[0]==0:
continue
if pff is not None and isinstance(pff, int) and counter>=pff:
break
counter+=1
log.info("On file {0} Season {1} Episode {2}".format(counter,season,episode))
f_data, fs, enc = oggread(f)
subtitle_frame = subtitle_frame.sort('start')
subtitle_frame.index = range(subtitle_frame.shape[0])
samps = []
good_rows = []
for i in xrange(0,subtitle_frame.shape[0]):
start = subtitle_frame['start'].iloc[i]
end = subtitle_frame['end'].iloc[i]
if end-start>6 or (subtitle_frame['label'][i]=='' and oll):
continue
samp = f_data[(start*fs):(end*fs),:]
samps.append({'samp' : samp, 'fs' : fs})
good_rows.append(i)
r = p.imap(process_subtitle, samps,chunksize=1)
sf = subtitle_frame.iloc[good_rows]
results = []
for i in range(len(samps)):
try:
results.append(r.next(timeout=timeout))
except TimeoutError:
results.append(None)
good_rows = [i for i in xrange(0,len(results)) if results[i]!=None]
audio_features = [i for i in results if i!=None]
good_sf = sf.iloc[good_rows]
good_sf.index = range(good_sf.shape[0])
audio_frame = pd.DataFrame(audio_features)
audio_frame.index = range(audio_frame.shape[0])
df = pd.concat([good_sf,audio_frame],axis=1)
df = df.fillna(-1)
df.index = range(df.shape[0])
frames.append(df)
lab_df_shape = df[df['label']!=''].shape[0]
log.info("Processed {0} lines, {1} of which were labelled".format(df.shape[0],lab_df_shape))
p.close()
p.join()
log.info("Done processing episodes.")
data = pd.concat(frames,axis=0)
data.index = range(data.shape[0])
data.index = range(data.shape[0])
for c in list(data.columns):
data[c] = data[c].real
for k in CHARACTERS:
for i in CHARACTERS[k]:
data['label'][data['label']==i] = k
self.label_codes = {k:i for (i,k) in enumerate(set(data['label']))}
reverse_label_codes = {self.label_codes[k]:k for k in self.label_codes}
data['label_code'] = [self.label_codes[k] for k in data['label']]
self.seq = SequentialValidate()
#Do cv to get error estimates
cv_frame = data[data['label']!=""]
self.seq.train(cv_frame,**self.seq.args)
self.res = self.seq.results
self.res = self.res[['line', 'label','label_code','result_code','result_label']]
exact_percent, adj_percent = compute_error(self.res)
log.info("Exact match percent: {0}".format(exact_percent))
log.info("Adjacent match percent: {0}".format(adj_percent))
#Predict in the frame
alg = RandomForestTrain()
target = cv_frame['label_code']
non_predictors = ["label","line","label_code"]
train_names = [l for l in list(cv_frame.columns) if l not in non_predictors]
train_data = cv_frame[train_names]
predict_data = data[train_names]
clf = alg.train(train_data,target,**alg.args)
data['result_code'] = alg.predict(predict_data)
data['result_label'] = [reverse_label_codes[k] for k in data['result_code']]
return data
def compute_error(data):
exact_match = data[data['result_code']==data['label_code']]
exact_match_percent = exact_match.shape[0]/data.shape[0]
adjacent_match = []
for i in xrange(0,data.shape[0]):
start = i-1
if start<1:
start = 1
end = i+2
if end>data.shape[0]:
end = data.shape[0]
sel_labs = list(data.iloc[start:end]['label_code'])
adjacent_match.append(data['result_code'][i] in sel_labs)
adj_percent = sum(adjacent_match)/data.shape[0]
return exact_match_percent,adj_percent
def calc_slope(x,y):
x_mean = np.mean(x)
y_mean = np.mean(y)
x_dev = np.sum(np.abs(np.subtract(x,x_mean)))
y_dev = np.sum(np.abs(np.subtract(y,y_mean)))
slope = (x_dev*y_dev)/(x_dev*x_dev)
return slope
def get_indicators(vec):
mean = np.mean(vec)
slope = calc_slope(np.arange(len(vec)),vec)
std = np.std(vec)
return mean, slope, std
def calc_u(vec):
fft = np.fft.fft(vec)
return np.sum(np.multiply(fft,vec))/np.sum(vec)
def calc_features(vec,freq):
#bin count
bc = 10
bincount = list(range(bc))
#framesize
fsize = 512
#mean
m = np.mean(vec)
#spectral flux
sf = np.mean(vec-np.roll(vec,fsize))
mx = np.max(vec)
mi = np.min(vec)
sdev = np.std(vec)
binwidth = len(vec)/bc
bins = []
for i in xrange(0,bc):
bins.append(vec[(i*binwidth):(binwidth*i + binwidth)])
peaks = [np.max(i) for i in bins]
mins = [np.min(i) for i in bins]
amin,smin,stmin = get_indicators(mins)
apeak, speak, stpeak = get_indicators(peaks)
#fft = np.fft.fft(vec)
bin_fft = []
for i in xrange(0,bc):
bin_fft.append(np.fft.fft(vec[(i*binwidth):(binwidth*i + binwidth)]))
cepstrums = [np.fft.ifft(np.log(np.abs(i))) for i in bin_fft]
inter = [get_indicators(i) for i in cepstrums]
acep,scep, stcep = get_indicators([i[0] for i in inter])
aacep,sscep, stsscep = get_indicators([i[1] for i in inter])
zero_crossings = np.where(np.diff(np.sign(vec)))[0]
zcc = len(zero_crossings)
zccn = zcc/freq
u = [calc_u(i) for i in bins]
spread = np.sqrt(u[-1] - u[0]**2)
skewness = (u[0]**3 - 3*u[0]*u[5] + u[-1])/spread**3
#Spectral slope
#ss = calc_slope(np.arange(len(fft)),fft)
avss = [calc_slope(np.arange(len(i)),i) for i in bin_fft]
savss = calc_slope(bincount,avss)
mavss = np.mean(avss)
return [m,sf,mx,mi,sdev,amin,smin,stmin,apeak,speak,stpeak,acep,scep,stcep,aacep,sscep,stsscep,zcc,zccn,spread,skewness,savss,mavss]
def extract_features(sample,freq):
left = calc_features(sample[:,0],freq)
right = calc_features(sample[:,1],freq)
return left+right
def process_subtitle(d):
samp = d['samp']
fs = d['fs']
if isinstance(samp,basestring):
return None
try:
features = extract_features(samp,fs)
except Exception:
log.exception("Cannot generate features")
return None
return features
class RandomForestTrain(Train):
"""
A class to train a random forest
"""
colnames = List()
clf = Complex()
category = RegistryCategories.algorithms
namespace = get_namespace(__module__)
algorithm = RandomForestClassifier
args = {'n_estimators' : 300, 'min_samples_leaf' : 4, 'compute_importances' : True}
help_text = "Train and predict with Random Forest."
class CrossValidate(Task):
data = Complex()
results = Complex()
error = Float()
importances = Complex()
importance = Complex()
column_names = List()
data_format = SimpsonsFormats.dataframe
category = RegistryCategories.preprocessors
namespace = get_namespace(__module__)
args = {
'nfolds' : 3,
'algo' : RandomForestTrain,
'target_name' : 'label_code',
'non_predictors' : ["label","line","label_code"]
}
help_text = "Cross validate simpsons data."
def cross_validate(self, data, **kwargs):
nfolds = kwargs.get('nfolds', 3)
algo = kwargs.get('algo')
seed = kwargs.get('seed', 1)
self.target_name = kwargs.get('target_name')
non_predictors = kwargs.get('non_predictors')
self.column_names = [l for l in list(data.columns) if l not in non_predictors]
data_len = data.shape[0]
counter = 0
fold_length = int(math.floor(data_len/nfolds))
folds = []
data_seq = list(xrange(0,data_len))
random.seed(seed)
random.shuffle(data_seq)
for fold in xrange(0, nfolds):
start = counter
end = counter + fold_length
if fold == (nfolds-1):
end = data_len
folds.append(data_seq[start:end])
counter += fold_length
results = []
data.index = range(data.shape[0])
self.importances = []
for (i,fold) in enumerate(folds):
predict_data = data.iloc[fold,:]
out_indices = list(chain.from_iterable(folds[:i] + folds[(i + 1):]))
train_data = data.iloc[out_indices,:]
alg = algo()
target = train_data[self.target_name]
train_data = train_data[[l for l in list(train_data.columns) if l not in non_predictors]]
predict_data = predict_data[[l for l in list(predict_data.columns) if l not in non_predictors]]
clf = alg.train(train_data,target,**algo.args)
results.append(alg.predict(predict_data))
self.importances.append(clf.feature_importances_)
return results, folds
def train(self, data, target, **kwargs):
"""
Used in the training phase. Override.
"""
self.target_name = kwargs.get('target_name')
results, folds = self.cross_validate(data, **kwargs)
self.gather_results(results, folds, data)
def gather_results(self, results, folds, data):
full_results = list(chain.from_iterable(results))
full_indices = list(chain.from_iterable(folds))
partial_result_df = make_df([full_results, full_indices], ["result", "index"])
partial_result_df = partial_result_df.sort(["index"])
partial_result_df.index = range(partial_result_df.shape[0])
result_df = pd.concat([partial_result_df, data], axis=1)
self.results = result_df
self.calc_importance(self.importances, self.column_names)
def calc_error(self, result_df):
self.error = np.mean(np.abs(result_df['result'] - result_df[self.target_name]))
def calc_importance(self, importances, col_names):
importance_frame = pd.DataFrame(importances)
importance_frame.columns = col_names
self.importance = importance_frame.mean(axis=0)
self.importance.sort(0)
def predict(self, data, **kwargs):
"""
Used in the predict phase, after training. Override
"""
pass
def make_df(datalist, labels, name_prefix=""):
df = pd.DataFrame(datalist).T
if name_prefix!="":
labels = [name_prefix + "_" + l for l in labels]
labels = [l.replace(" ", "_").lower() for l in labels]
df.columns = labels
df.index = range(df.shape[0])
return df
CHARACTERS = {
'Tertiary': [
'Willy',
'Hibbert',
'Ralph',
'Barney',
'Carl',
'Otto',
'Dr.Nick',
'Ms.K',
'Teacher',
'Kids',
'Santa',
'Lenny',
'Comic Book Guy',
'Quimby',
'Ms.Hoover',
'Patty',
'Duffman',
'Troy',
'Kid'],
}
"""
from tasks.train import Vectorizer
v = Vectorizer()
log.info(data['label'])
v.fit(list(data['line']),list(data['label_code']))
feats = v.batch_get_features(list(data['line']))
feats_frame = pd.DataFrame(feats)
feats_frame.columns = list(xrange(100,feats_frame.shape[1]+100))
feats_frame.index = range(feats_frame.range[0])
data = pd.concat([data,feats_frame],axis=1)
data = data.fillna(-1)
"""
class SequentialValidate(CrossValidate):
args = {
'min_years' : 10,
'algo' : RandomForestTrain,
'split_var' : 'season',
'target_name' : 'label_code',
'non_predictors' : ["label","line","label_code", 'result_label','result_code']
}
def sequential_validate(self, data, **kwargs):
algo = kwargs.get('algo')
seed = kwargs.get('seed', 1)
split_var = kwargs.get('split_var')
non_predictors = kwargs.get('non_predictors')
self.target_name = kwargs.get('target_name')
random.seed(seed)
label_codes = {k:i for (i,k) in enumerate(set(data['label']))}
results = []
self.importances = []
unique_seasons = list(set(data[split_var]))
for s in unique_seasons:
train_data = data[data[split_var] != s]
predict_full = data[data[split_var] == s]
alg = algo()
target = train_data[self.target_name]
train_names = [l for l in list(train_data.columns) if l not in non_predictors]
train_data = train_data[train_names]
predict_data = predict_full[train_names]
clf = alg.train(train_data,target, **algo.args)
predict_full['result_code'] = alg.predict(predict_data)
predict_full['confidence'] = np.amax(clf.predict_proba(predict_data))
self.importances.append(clf.feature_importances_)
results.append(predict_full)
reverse_label_codes = {label_codes[k]:k for k in label_codes}
reverse_label_codes.update({-1 : ''})
self.results = pd.concat(results,axis=0,ignore_index=True)
self.results['result_label'] = [reverse_label_codes[k] for k in self.results['result_code']]
self.calc_importance(self.importances, train_names)
def train(self, data, **kwargs):
"""
Used in the training phase. Override.
"""
self.sequential_validate(data, **kwargs)
| |
"""
Tests for locust api module
These tests requires locust installed
"""
#pylint: disable=W0403,C0103,too-many-public-methods
import unittest
from subprocess import call
from netifaces import interfaces
from locust.api import Agent
from time import time, sleep
STATUSES = {'success': 'success',
'error': 'error'}
MESSAGES = {'success': 'Network adapter is disabled',
'error': 'Network adapter is not disabled'}
HOSTNAME = 'google.com'
def is_network_enabled():
"""Ping a host to check if network is enabled """
cmd_ptrn = 'ping -c {packets} {hostname} '
cmd_ptrn = cmd_ptrn.format(packets=1, hostname=HOSTNAME)
result = not bool(call(cmd_ptrn, shell=True))
sleep(1)
return result
def wait_for_network_disabled(seconds=30):
"""Wait until network is disabled"""
then = time() + seconds
while then > time():
if not is_network_enabled():
return True
return False
def wait_for_network_enabled(seconds=30):
"""Wait until network is enabled"""
then = time() + seconds
while then > time():
if is_network_enabled():
return True
return False
def check_network_interface_is_up(interface_name):
"""Check if netiface is up using 'ip' console command"""
cmd_ptrn = "ip a|grep ': {interface}:.*state UP'"
cmd_ptrn = cmd_ptrn.format(interface=interface_name)
response = 0 == call(cmd_ptrn, shell=True)
return response
def get_active_adapter():
""" Returns first active adapter from the list of adapters"""
for adapter in interfaces():
if check_network_interface_is_up(adapter):
return adapter
class DisableNetworkAdaptersApi(unittest.TestCase):
"""Implements unit tests
for disable_network_adapters method of bagent.api."""
time_to_be_disabled_common = 5
time_delta_for_reconnect_common = 30
time_to_wait_in_test_common = time_to_be_disabled_common +\
time_delta_for_reconnect_common
wrong_status = 'Expected status: {expected}. Current status: {actual}'
wrong_message = 'Expected message: {expected}. Current message: {actual}'
was_not_enabled = 'Network was not enabled after {seconds} seconds'
was_enabled = 'Network was enabled after {seconds} seconds.' \
'Should be disabled'
was_disabled = 'Network was disabled. Should stay enabled'
def test_disable_one_network_adapter(self):
"""Disables an active adapter
and then enables after specified timeout."""
time_to_be_disabled = self.time_to_be_disabled_common
time_to_wait_in_test = self.time_to_wait_in_test_common
adapter = get_active_adapter()
self.assertTrue(wait_for_network_enabled(
self.time_delta_for_reconnect_common),
'Initially Network is disabled.')
result = Agent.disable_network_adapters(adapter, time_to_be_disabled)
self.assertEqual(type(result), dict, 'Returned result should be dict')
status_from_result = result['list'][0]['status']
message_from_result = result['list'][0]['message']
self.assertEqual(status_from_result, STATUSES['success'],
self.wrong_status.format(
expected=STATUSES['success'],
actual=status_from_result))
self.assertEqual(message_from_result, MESSAGES['success'],
self.wrong_message.format(
expected=MESSAGES['success'],
actual=message_from_result))
self.assertTrue(wait_for_network_disabled(),
'Network was not disabled')
self.assertTrue(wait_for_network_enabled(time_to_wait_in_test),
self.was_not_enabled.format(
seconds=time_to_wait_in_test))
def test_disable_all_network_adapters_specify_list_of_adapter(self):
"""Disables all adapters (takes list of adapters as argument)
and then enables after specified timeout."""
time_to_be_disabled = self.time_to_be_disabled_common
time_to_wait_in_test = self.time_to_wait_in_test_common
adapters = interfaces()
self.assertTrue(wait_for_network_enabled(
self.time_delta_for_reconnect_common),
'Initially Network is disabled.')
result = Agent.disable_network_adapters(adapters, time_to_be_disabled)
self.assertEqual(type(result), dict, 'Returned result should be dict')
status_from_result = result['list'][0]['status']
message_from_result = result['list'][0]['message']
self.assertEqual(status_from_result, STATUSES['success'],
self.wrong_status.format(
expected=STATUSES['success'],
actual=status_from_result))
self.assertEqual(message_from_result, MESSAGES['success'],
self.wrong_message.format(
expected=MESSAGES['success'],
actual=message_from_result))
self.assertTrue(wait_for_network_disabled(),
'Network was not disabled')
self.assertTrue(wait_for_network_enabled(time_to_wait_in_test),
self.was_not_enabled.format(
seconds=time_to_wait_in_test))
def test_disable_all_network_adapters_empty_list_of_adapters(self):
"""Disables all adapters ('adapters' parameter is not set)
and then enables after specified timeout."""
time_to_be_disabled = self.time_to_be_disabled_common
time_to_wait_in_test = self.time_to_wait_in_test_common
adapters = None
self.assertTrue(wait_for_network_enabled(
self.time_delta_for_reconnect_common),
'Initially Network is disabled.')
result = Agent.disable_network_adapters(adapters, time_to_be_disabled)
self.assertEqual(type(result), dict, 'Returned result should be dict')
status_from_result = result['list'][0]['status']
message_from_result = result['list'][0]['message']
self.assertEqual(status_from_result, STATUSES['success'],
self.wrong_status.format(
expected=STATUSES['success'],
actual=status_from_result))
self.assertEqual(message_from_result, MESSAGES['success'],
self.wrong_message.format(
expected=MESSAGES['success'],
actual=message_from_result))
self.assertTrue(wait_for_network_disabled(),
'Network was not disabled')
self.assertTrue(wait_for_network_enabled(time_to_wait_in_test),
self.was_not_enabled.format(
seconds=time_to_wait_in_test))
def test_disable_all_network_adapters_no_time_no_adapters(self):
"""Disables all adapters ('adapters' parameter is not set)
for unlimited time ('time' parameter = 0)."""
time_to_be_disabled = 0
time_to_wait_in_test = self.time_to_wait_in_test_common
adapters = None
self.assertTrue(wait_for_network_enabled(
self.time_delta_for_reconnect_common),
'Initially Network is disabled.')
result = Agent.disable_network_adapters(adapters, time_to_be_disabled)
self.assertEqual(type(result), dict, 'Returned result should be dict')
status_from_result = result['list'][0]['status']
message_from_result = result['list'][0]['message']
self.assertEqual(status_from_result, STATUSES['success'],
self.wrong_status.format(
expected=STATUSES['success'],
actual=status_from_result))
self.assertEqual(message_from_result, MESSAGES['success'],
self.wrong_message.format(
expected=MESSAGES['success'],
actual=message_from_result))
self.assertTrue(wait_for_network_disabled(),
'Network was not disabled')
self.assertFalse(wait_for_network_enabled(time_to_wait_in_test),
self.was_enabled.format(seconds=time_to_wait_in_test))
def test_disable_not_existing_network_adapter(self):
""" Trying to use name that does not exist.
Verifying that correct error message is shown."""
time_to_be_disabled = self.time_to_be_disabled_common
time_to_wait_in_test = self.time_to_wait_in_test_common
adapter = 'this_adapter_does_not_exist'
self.assertTrue(wait_for_network_enabled(
self.time_delta_for_reconnect_common),
'Initially Network is disabled.')
result = Agent.disable_network_adapters(adapter, time_to_be_disabled)
self.assertEqual(type(result), dict, 'Returned result should be dict')
status_from_result = result['list'][0]['status']
message_from_result = result['list'][0]['message']
self.assertEqual(status_from_result, STATUSES['error'],
self.wrong_status.format(
expected=STATUSES['error'],
actual=status_from_result))
self.assertEqual(message_from_result, MESSAGES['error'],
self.wrong_message.format(
expected=MESSAGES['error'],
actual=message_from_result))
self.assertFalse(wait_for_network_disabled(time_to_wait_in_test),
'Network was disabled. Should stay enabled')
def setUp(self):
if not wait_for_network_enabled(self.time_to_wait_in_test_common):
Agent.enable_network_adapters()
wait_for_network_enabled(self.time_delta_for_reconnect_common)
@classmethod
def tearDownClass(cls):
if not wait_for_network_enabled(cls.time_to_be_disabled_common):
Agent.enable_network_adapters()
wait_for_network_enabled(cls.time_delta_for_reconnect_common)
def main():
"""method for invoking unit tests."""
unittest.main(verbosity=3)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from collections import Counter, OrderedDict
import copy
import logging
from . import SPACES, types
from .types.symbolic import (
k_used_symbols,
k_num_internal_syms,
is_symbolic,
)
from .var import Var, InternalVar
from .visitors.dot_visitor import DotVisitor
# BLOCK_STACK[-1] is the current block
BLOCK_STACK = []
DEBUG = False
def curr_block():
if len(BLOCK_STACK) == 0:
raise ValueError("Must call Builder inside an Function" + " or Block")
return BLOCK_STACK[-1]
class InvalidBlockStateError(Exception):
pass
class Block(object):
__slots__ = [
"name",
"_block_inputs",
"_outputs",
"operations",
"_internal_vars",
"outer_op",
]
counter = 0
@classmethod
def _get_new_name(cls):
curr_val = cls.counter
cls.counter += 1
return "block" + str(curr_val)
def __init__(self, block_inputs=None, outer_op=None, name=None):
"""
Inputs:
block_inputs: python tuple[Var].
block_inputs is None except when the block represents loop. By
convention block_inputs should have name ending in '.x', and the
Variable are not produced by any op (block_inputs[i]._op is None).
Ex:
# main(%a: (1, 2, fp32),
# %b: (1, 2, fp32),
# %c: (1, 2, fp32)) {
# block0() {
# %const1: (1, fp32) = const(...)
# %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \
# while_loop(loop_vars=(%a, %b))
# loop_cond(%a.x, %b.x) {
# %blah: (bool) = some_op(x=%a.x, y=%b.x)
# %cond_var: (bool) = some_op2(x=%a.x, y=%blah)
# } -> (%cond_var)
# loop_body(%a.x, %b.x) {
# %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x)
# } -> (%add_0, %b.x)
# %linear: (1, fp32) = linear(...)
# } -> (%loop:0, %loop:1)
# }
%a.x, %b.x are block_inputs.
`some_op` in `loop_cond` block can access %a, %b, %a.x, %b.x.
`some_op`, however, cannot take %linear as input.
outer_op: Operation
The enclosing op. None iff this Block is an Function.
function_inputs: tuple[Var]
function_inputs are always visible for this block and all blocks
nested within. If function_inputs is None, get it from
`outer_op.block`
"""
self.name = name
if self.name is None:
self.name = Block._get_new_name()
# list[Operation]. Topologically sorted.
self.operations = []
# Must be set before self.validate()
self.outer_op = outer_op
self._block_inputs = block_inputs
if self._block_inputs is None:
self._block_inputs = tuple()
# list[Var]. This is converted to str when generating MIL proto.
self._outputs = []
# If we create const, whose inputs (mode, val) cannot be const
# (infinite recursion). They must be considered as always visible.
self._internal_vars = set()
if self.outer_op is None and not isinstance(self, Function):
msg = "Block {} is not Function and thus outer_op cannot be None"
raise ValueError(msg.format(self.name))
self.validate()
def validate(self):
"""
Basic validation to protect against some invalid state.
"""
if not DEBUG:
return
for op in self.operations:
for b in op.blocks:
b.validate()
if op.outputs is None:
raise InvalidBlockStateError()
# Check the input output relationships
# from outputs -> inputs
for ov in op.outputs:
child_op_count = Counter(ov.child_ops)
for next_op, c in child_op_count.items():
c_actual = next_op.get_flattened_inputs().count(ov)
if c_actual != c:
msg = (
"Var {} should be consumed by op {} {}"
+ " times, but op {} uses it {} times.\n{}"
)
raise InvalidBlockStateError(
msg.format(
ov.name,
next_op.name,
c,
next_op.name,
c_actual,
next_op,
)
)
# from inputs -> outputs
input_var_count = Counter(op.get_flattened_inputs())
for iv, c in input_var_count.items():
c_actual = iv.child_ops.count(op)
if c_actual != c:
msg = (
"Var {} should be consumed by op {} {}"
+ " times, but op {} uses it {} times.\n{}"
)
raise InvalidBlockStateError(
msg.format(iv.name, op.name, c_actual, op.name, c, op)
)
# 1 to 1 mapping between Block outputs and Var.consuming_blocks
for op in self.operations:
for ov in op.outputs:
for b in ov.consuming_blocks:
if ov not in b.outputs:
msg = "Var {} should be output of block {}: {}"
raise ValueError(msg.format(ov.name, b.name, b))
for v in self.outputs:
if self not in v.consuming_blocks:
msg = "Var {} should be output of block {}: {}"
raise ValueError(msg.format(ov.name, b.name, b))
def remove_inputs(self, curr_input_vars):
"""
curr_input_vars: list[Var], whose elements must be in
self._block_inputs.
"""
self.validate()
remove_idx = [self._block_inputs.index(v) for v in curr_input_vars]
self._block_inputs = [
v for i, v in enumerate(self._block_inputs) if i not in remove_idx
]
def find_ops(self, prefix=None, op_type=None):
"""
Return list of ops with name matching `prefix` if specified and
op_type, if specified. At least one of {prefix, op_type} must be specified.
prefix: str
Return list[Operation]. Empty list if no op satisfies.
"""
if prefix is None and op_type is None:
raise ValueError("Must specify one of {prefix, op_type}")
found_ops = []
for op in self.operations:
prefix_match = prefix is None or op.name[: len(prefix)] == prefix
op_type_match = op_type is None or op.op_type == op_type
if prefix_match and op_type_match:
found_ops.append(op)
for b in op.blocks:
found_ops.extend(b.find_ops(prefix=prefix, op_type=op_type))
return found_ops
def add_internal_var(self, internal_var):
if not isinstance(internal_var, InternalVar):
raise ValueError("Only InternalVar can be manually added to Block.")
self._internal_vars.add(internal_var)
@property
def inputs(self):
return self._block_inputs
@property
def outputs(self):
return self._outputs
def set_outputs(self, outputs):
"""
outputs: list[Var]
"""
if not isinstance(outputs, list):
raise ValueError("Outputs must be list of Vars")
self.validate()
visible_vars = self._visible_vars_from_enclosing_block()
_, visible_vars_in_block = self._visible_vars_in_block()
visible_vars.update(visible_vars_in_block)
for ov in outputs:
if ov not in visible_vars:
msg = (
"Var {} is not visible in block {} and thus cannot "
+ "be a block output.\n{}"
)
raise ValueError(msg.format(ov.name, self.name, self))
for ov in self._outputs:
ov.consuming_blocks.remove(self)
# Need to copy, or block's output would be completely tied to a var's
# output and we cannot replace a block output with another var's
# output.
self._outputs = copy.copy(outputs)
for ov in outputs:
ov.consuming_blocks.append(self)
def __enter__(self):
global BLOCK_STACK
BLOCK_STACK.append(self)
return self
def __exit__(self, type, value, traceback):
global BLOCK_STACK
BLOCK_STACK = BLOCK_STACK[:-1]
def _visible_vars_in_block(self, target_op=None, inclusive=True):
"""
Returns
-------
- index (int) of target_op in self.operations if target_op not None,
undefined otherwise.
- visible_vars: set[Var]
Vars returned by ops in the block (self) visible (and equal to
if inclusive==True) target_op. If target_op is not found or None,
include all vars output by self.operations. Examples:
Raises
------
- ValueError if target_op not None and not found in self.operations.
# main(%a: (1, 2, fp32),
# %b: (1, 2, fp32),
# %c: (1, 2, fp32)) {
# block0() {
# %const1: (1, fp32) = const(...)
# %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \
# while_loop(loop_vars=(%a, %b))
# loop_cond(%a.x, %b.x) {
# %blah: (bool) = some_op(x=%a.x, y=%b.x)
# %cond_var: (bool) = some_op2(x=%a.x, y=%blah)
# } -> (%cond_var)
# loop_body(%a.x, %b.x) {
# %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x)
# } -> (%add_0, %b.x)
# %linear: (1, fp32) = linear(...)
# } -> (%loop:0, %loop:1)
# }
#
Let V0 and V1 be the set of internal_vars of block0 and loop_cond
block that supplies const vals (for const).
Ex1: self = block0, target_op = linear.
idx = 2
visible_vars = {%const1, %loop:0, %loop:1, %linear, V0}
Ex2: self = loop_cond, target_op = None.
idx = undefined
visible_vars = {%a.x, %b.x, %blah, %cond_var, V1}
Ex3: self = loop_cond, target_op = some_op.
idx = 0
visible_vars = {%a.x, %b.x, %blah, V1}
Ex4: self = loop_cond, target_op = linear.
raises ValueError (linear not found in loop_cond block)
"""
visible_vars = set(self._internal_vars)
if isinstance(self, Function):
# Function inputs
visible_vars.update(tuple(self.inputs.values()))
else:
# Block inputs
visible_vars.update(self.inputs)
idx = -1
# find the location of target_op
for i, op in enumerate(self.operations):
if op == target_op:
if inclusive and op.outputs is not None:
visible_vars.update(op.outputs)
return i, visible_vars
# When op is being constructed (e.g.,type_inference), op.outputs
# is None
if op.outputs is not None:
visible_vars.update(op.outputs)
if target_op is not None:
msg = "Op {} not found in {}: {}"
raise ValueError(msg.format(target_op.name, self.name, self))
return idx, visible_vars
def _visible_vars_from_enclosing_block(self):
"""
Returns:
visible_vars: Vars from lexical scopes visible at the beginning of the
block, up to but not including outputs from before_op. Given program:
# main(%a: (1, 2, fp32),
# %b: (1, 2, fp32),
# %c: (1, 2, fp32)) {
# block0() {
# %const1: (1, fp32) = const(...)
# %loop:0: (1, 2, fp32), %loop:1: (1, 2, fp32) = \
# while_loop(loop_vars=(%a, %b))
# loop_cond(%a.x, %b.x) {
# %blah: (bool) = some_op(x=%a.x, y=%b.x)
# %cond_var: (bool) = some_op2(x=%a.x, y=%blah)
# } -> (%cond_var)
# loop_body(%a.x, %b.x) {
# %add_0: (1, 2, fp32) = add(x=%a.x, y=%b.x)
# } -> (%add_0, %b.x)
# %const2: (1, fp32) = const(...)
# } -> (%loop:0, %loop:1)
# }
Let V0 be the set of internal_vars of block0 block that supplies const
vals (for const).
Ex1: self = block0
visible_vars = {%a, %b, %c} (function input)
Ex2: self = loop_cond.
visible_vars = {%a, %b, %c, %const1, V0} (Note that %const2 is not
part of the set)
"""
visible_vars = set()
# function inputs are considered external to the block.
if isinstance(self, Function):
# block in function only has function_inputs as from enclosing
# block (Ex1 above).
visible_vars.update(self.function_inputs)
return visible_vars
if self.outer_op is not None:
enclosing_block = self.outer_op.enclosing_block
vars_at_start = enclosing_block._visible_vars_from_enclosing_block()
visible_vars.update(vars_at_start)
_, visible_vars_in_block = enclosing_block._visible_vars_in_block(
self.outer_op, inclusive=False
)
visible_vars.update(visible_vars_in_block)
return visible_vars
def _insert_op_before(self, new_op, before_op=None):
"""
A private API used by builder. Please use `builder.YOUR_OP(...,before_op)`.
new_op's outputs are not used (not input to any other op) after
this call. All inputs to new_op must be visible at or before
the before_op (i.e., new_op must be added in topologically sorted
order). Note that this is more restrictive than MIL, whose Block
supports lexical scoping and thus an op can reference Var in enclosing
scopes. new_op.name must be unique in the block.
before_op=None to append new_op at the end of self.operations.
Given: %2 = op0(%1, %1)
%4 = op2(%1)
%6 = op3(%4, %4)
Execute: insert_op_before(op1, before_op=op2),
where %3 = op1(%1, %2)
Result: %2 = op0(%1, %1)
%3 = op1(%1, %2)
%4 = op2(%1)
%6 = op3(%4, %4)
Comment: We assume op1 has been constructed outside the block with
%1, %2 as inputs. Typically it's builder's job to create an op and
insert into the current block.
Comment: insert_op_before(op1, before_op=op0) would error as %2 (an input to op1)
is not visible before op0.
"""
self.validate()
visible_vars = self._visible_vars_from_enclosing_block()
if before_op is not None:
idx, visible_vars_in_block = self._visible_vars_in_block(
before_op, inclusive=True
)
visible_vars.update(visible_vars_in_block)
else:
_, visible_vars_in_block = self._visible_vars_in_block()
visible_vars.update(visible_vars_in_block)
# check inputs are visible
for k, v in new_op.inputs.items():
if not isinstance(v, (Var, tuple)):
continue
if isinstance(v, Var):
vs = [v]
else:
vs = v
for s in vs:
if s not in visible_vars:
before_op_name = before_op.name if before_op is not None else "None"
msg = "Op '{}' input {}={} is not in scope of {} before {}"
raise ValueError(
msg.format(new_op.name, k, s.name, self.name, before_op_name)
)
# add new_op
if before_op is None:
self.operations.append(new_op)
else:
self.operations.insert(idx, new_op)
def _replace_var(
self,
old_var,
new_var,
start=0,
end_id=-1,
no_check_var_types=False,
):
"""Helper function for replace_uses_of_var_after_op"""
num_ops_affected = 0
if end_id == -1:
op_list = self.operations[start:]
else:
op_list = self.operations[start : end_id + 1]
for op in op_list:
new_inputs = {}
affected = False
for k, v in op.inputs.items():
if isinstance(v, (list, tuple)) and old_var in v:
new_inputs[k] = tuple(new_var if vv == old_var else vv for vv in v)
affected = True
elif v == old_var:
new_inputs[k] = new_var
affected = True
else:
new_inputs[k] = v
if affected:
num_ops_affected += 1
op.set_inputs(no_check_var_types=no_check_var_types,
**new_inputs)
# Replace recursively.
for b in op.blocks:
num_ops_affected += b._replace_var(old_var, new_var)
if end_id != -1 and old_var.op not in op_list:
return num_ops_affected
if old_var in self._block_inputs:
idx = self._block_inputs.index(old_var)
self._block_inputs = list(self._block_inputs)
self._block_inputs[idx] = new_var
self._block_inputs = tuple(self._block_inputs)
# If old_var is block's output, replace as well.
if old_var in self._outputs:
idx = self._outputs.index(old_var)
self._outputs[idx] = new_var
new_var.consuming_blocks.append(self)
# This block no longer uses `old_var` as its outputs
old_var.consuming_blocks.remove(self)
# if rename_new_var_if_fn_output:
# Ensure output name is consistent
if isinstance(self, Function):
new_var.name = old_var.name
return num_ops_affected
def replace_uses_of_var_after_op(
self,
anchor_op,
old_var,
new_var,
no_check_var_visibility=False,
end_op=None,
no_check_var_types=False,
):
"""
Replace all uses of `old_var` with `new_var` after `anchor_op`,
and before `end_op` (inclusive).
That is all the ops that use `old_var` will now use `new_var`.
The op that produces the `old_var` will continue to produce it, its output
won't be replaced by `new_var`.
If `anchor_op` is None, replace all input occurrences of `old_var` in the block.
If `end_op` is None, all occurrences of `old_var` are replaced in the block starting from the op just
after `anchor_op`
no_check_var_visibility: True to disable the check ensuring new_var is visible
(visibility requirement depends on anchor_op).
no_check_var_types: An error will be raised if the type of new_var is not same as the old_var, unless
`no_check_var_types` is set to True. Normally type inference is re-invoked for all the child ops of `old_var`
after updating it to `new_var`. However, this is skipped if `no_check_var_types` is set to True.
old_var, new_var must meet the following conditions:
- old_var, new_var both existing within the block. This implies that
the op generating new_var must be inserted prior to this
replacement.
- Affected ops (i.e., Operation after anchor_op that take old_var as
input) must generate the same type inference results as before.
- new_var must be visible at or before anchor_op in the order of
self.operations.
Given: %2 = op0(%1, %1)
%3 = op1(%1, %2)
%4 = op2(%1)
%6 = op3(%4, %4)
Execute: replace_uses_of_var_after_op(op2, %4, %3)
Result: %2 = op0(%1, %1)
%3 = op1(%1, %2)
%4 = op2(%1)
%6 = op3(%3, %3) # type inference check against %6
Comment: Execute: replace_uses_of_var_after_op(op1, %4, %3) would lead to
identical results, as op2 does not take %4 as input.
Comment: replace_uses_of_var_after_op(op0, %4, %3) would cause error as %3 is
after op0
Comment: To avoid clutter, we drop the names of arguments and return
Var in the illustration above.
Another example, usage of "end_op":
Given: %2 = op0(%1, %1)
%3 = op1()
%4 = op2(%1, %2)
%5 = op3(%2)
if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3)
Result: %2 = op0(%1, %1)
%3 = op1()
%4 = op2(%1, %3)
%5 = op3(%3)
if execute replace_uses_of_var_after_op(anchor_op=op0, old_var=%2, new_var=%3, end_op=op2)
Result: %2 = op0(%1, %1)
%3 = op1()
%4 = op2(%1, %3) # %2 is replaced with %3 till here
%5 = op3(%2) # will continue using %2
"""
if not no_check_var_visibility:
self.validate()
# Get visible vars from enclosing block
visible_vars = self._visible_vars_from_enclosing_block()
if anchor_op is not None:
# Get visible vars from the current block
idx, block_vars = self._visible_vars_in_block(anchor_op, inclusive=True)
visible_vars.update(block_vars)
# start from the next op, excluding `anchor_op`
start = idx + 1
else:
_, block_vars = self._visible_vars_in_block()
visible_vars.update(block_vars)
visible_vars.update(self._block_inputs)
visible_vars.update(self._internal_vars)
# Perform replacement from beginning
start = 0
if not no_check_var_visibility and new_var not in visible_vars:
msg = (
"new_var '{}' is not visible in block '{}' at or before "
+ "anchor_op '{}'"
)
anchor_op_name = "None" if anchor_op is None else anchor_op.name
raise ValueError(msg.format(new_var.name, self.name, anchor_op_name))
if end_op is not None:
end_id, _ = self._visible_vars_in_block(end_op, inclusive=True)
else:
end_id = -1
if end_id != -1 and end_id < start:
msg = "end_op '{}' comes before the anchor_op '{}'"
raise ValueError(msg.format(end_op.name, anchor_op.name))
num_ops_affected = self._replace_var(
old_var,
new_var,
start=start,
end_id=end_id,
no_check_var_types=no_check_var_types,
)
logging.debug("Num ops affected in replacing var: {}".format(num_ops_affected))
def remove_ops(self, existing_ops):
"""
Remove `existing_ops` (list[Operation]) that must be pre-existing in
the block. Error if any other op in the block uses output Vars of
`existing_ops`
"""
self.validate()
idxs = [-1] * len(existing_ops)
existing_ops_set = set(existing_ops)
for i, op in enumerate(self.operations):
if op in existing_ops_set:
idxs[existing_ops.index(op)] = i
if -1 in idxs:
not_found = []
for i, op in zip(idxs, existing_ops):
if i == -1:
not_found.append(op.name)
raise ValueError(
"Ops {} not found in block {}".format(not_found, self.name)
)
# Remove ops in reverse topological order
pairs = list(zip(idxs, existing_ops))
pairs.sort(key=lambda x: x[0], reverse=True)
for idx, op in pairs:
for i, v in enumerate(op.outputs):
# Check that no ops depend on op's outputs
if len(v.child_ops) > 0:
child_op_names = [s.name for s in v.child_ops]
msg = (
"Cannot delete op '{}' with active output at id {}: '{}' "
+ "used by ops {}"
)
raise ValueError(msg.format(op.name, i, v.name, child_op_names))
# Check that the output Var isn't block's output
if v in self._outputs:
msg = (
"cannot delete op {} with output {}: {} "
+ "that's block {}'s output"
)
raise ValueError(msg.format(op.name, i, v.name, self.name))
for b in op.blocks:
b.set_outputs([])
b.remove_ops(b.operations)
# remove the op (in reverse topological order)
self.operations.pop(idx)
op.enclosing_block = None
for v in op.inputs.values():
if isinstance(v, (tuple, list)):
for vv in v:
vv.remove_child_op(op)
else:
v.remove_child_op(op)
def operations_for_vars(self, end_vs):
"""
Inputs:
end_vs: list[Operation].
Return:
list[Operation] which are subset of self.operations that are ancestors
of `end_vs`. Also do recursion into nested blocks.
"""
used_vars = set(end_vs)
used_ops = []
for op in reversed(self.operations):
# if none of op's output is used, delete op
if not set(op.outputs).intersection(used_vars):
continue
used_ops.append(op) # append in reverse topological order
# recursively search for nested blocks
ops_to_check = []
for b in op.blocks:
ops_to_check += b.operations_for_vars(b.outputs)
ops_to_check.append(op)
# mark used vars
for op_to_check in ops_to_check:
# mark all op's inputs to used
for _, input_var in op_to_check.inputs.items():
if isinstance(input_var, (tuple, list)):
used_vars.update(list(input_var))
else:
used_vars.add(input_var)
return used_ops[::-1]
def indented_str(self, indent=None):
if indent is None:
indent = ""
s = (
indent
+ self.name
+ "("
+ ", ".join([str(var) for var in self._block_inputs])
)
s += ") {\n"
for op in self.operations:
s += op.indented_str(indent + SPACES * 1)
s += indent + "} -> ("
if self._outputs is not None:
s += ", ".join(["%" + v.name for v in self._outputs])
s += ")\n"
return s
def __repr__(self):
return self.__str__()
def __str__(self):
return self.indented_str()
def get_dot_string(
self,
function_name="main",
prefix_id=0,
highlight_debug_op_types=None,
highlight_debug_op_names=None,
):
"""
Return the dot string that can be used to show the block
with dot. Const ops are not added to the dot string.
* Input vars : yellow
* output vars : goldenrod2
* op names that user wants to highlight, provided in "highlight_debug_op_names": cyan
* op types that user wants to highlight, provided in "highlight_debug_op_types": green
Examples
--------
>>> import graphviz
>>> graphviz.Source(block.get_dot_string()).view()
>>> # OR
>>> graphviz.Source(block.get_dot_string()).view(filename='graph.pdf')
"""
if highlight_debug_op_types is None:
highlight_debug_op_types = []
if highlight_debug_op_names is None:
highlight_debug_op_names = []
dotstring = "digraph g {\n" + "\tcompound=true;\n"
input_var_names = list(self.inputs.keys())
output_var_names = [v.name for v in self.outputs]
debug_op_types = []
if len(highlight_debug_op_types) > 0:
for op in self.operations:
if op.op_type in highlight_debug_op_types:
debug_op_types.append(op.name)
vis = DotVisitor()
vis.highlight_nodes(input_var_names, "yellow").highlight_nodes(
output_var_names, "goldenrod2"
).highlight_nodes(highlight_debug_op_names, "cyan").highlight_nodes(
debug_op_types, "green"
)
vis.visit_all(self, nodename_prefix=str(prefix_id))
res = vis.get_result("subgraph", "cluster_" + function_name.replace("/", "_"))
dotstring += "\n".join("\t" + r for r in res.split("\n")) + "\n"
dotstring += "}"
return dotstring
class Function(Block):
"""
"""
def __init__(self, inputs):
"""
inputs: str -> placeholder
"""
self.placeholder_inputs = inputs
# str -> Var
self._input_dict = OrderedDict()
for k, v in self.placeholder_inputs.items():
v.set_name(k) # set to user input name
self._input_dict[k] = v.outputs[0]
self.function_inputs = tuple(self._input_dict.values())
global k_used_symbols
global k_num_internal_syms
for inp in self.function_inputs:
if types.is_tensor(inp.dtype):
shapes = inp.dtype.get_shape()
for s in shapes:
if is_symbolic(s):
k_used_symbols.add(s)
super(Function, self).__init__()
# Override Block's input
@property
def inputs(self):
return self._input_dict
def __repr__(self):
return self.__str__()
def __str__(self):
return self.to_str("function")
def to_str(self, func_name="function"):
if len(self._input_dict) == 0:
s = func_name + "() {"
else:
inputs = [(in_name, ph) for in_name, ph in self._input_dict.items()]
s = func_name + "(" + str(inputs[0][1])
for in_name, ph in inputs[1:]:
s += ",\n" + " " * (len(func_name) + 1) + str(ph)
s += ") {\n"
s += self.indented_str(SPACES)
s += "}\n"
return s
| |
#
# This file contains the UserInteraction and Session classes.
#
# The Session encapsulates settings and command results, allowing commands
# to be chanined in an interactive environment.
#
# The UserInteraction classes log the progress and performance of individual
# operations and assist with rendering the results in various formats (text,
# HTML, JSON, etc.).
#
###############################################################################
import datetime
import getpass
import os
import random
import re
import sys
import tempfile
import traceback
import json
import urllib
from collections import defaultdict
from json import JSONEncoder
from jinja2 import TemplateError, TemplateSyntaxError, TemplateNotFound
from jinja2 import TemplatesNotFound, TemplateAssertionError, UndefinedError
import mailpile.commands
import mailpile.util
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.search import MailIndex
from mailpile.util import *
class SuppressHtmlOutput(Exception):
pass
def default_dict(*args):
d = defaultdict(str)
for arg in args:
d.update(arg)
return d
class NoColors:
"""Dummy color constants"""
C_SAVE = ''
C_RESTORE = ''
NORMAL = ''
BOLD = ''
NONE = ''
BLACK = ''
RED = ''
YELLOW = ''
BLUE = ''
MAGENTA = ''
CYAN = ''
FORMAT = "%s%s"
RESET = ''
LINE_BELOW = ''
def max_width(self):
return 79
def color(self, text, color='', weight=''):
return '%s%s%s' % (self.FORMAT % (color, weight), text, self.RESET)
def replace_line(self, text, chars=None):
pad = ' ' * max(0, min(self.max_width(),
self.max_width()-(chars or len(unicode(text)))))
return '%s%s\r' % (text, pad)
def add_line_below(self):
pass
def print_below(self):
pass
def write(self, data):
sys.stderr.write(data)
def check_max_width(self):
pass
class ANSIColors(NoColors):
"""ANSI color constants"""
NORMAL = ''
BOLD = ';1'
NONE = '0'
BLACK = "30"
RED = "31"
YELLOW = "33"
BLUE = "34"
MAGENTA = '35'
CYAN = '36'
RESET = "\x1B[0m"
FORMAT = "\x1B[%s%sm"
CURSOR_UP = "\x1B[1A"
CURSOR_DN = "\x1B[1B"
CURSOR_SAVE = "\x1B[s"
CURSOR_RESTORE = "\x1B[u"
CLEAR_LINE = "\x1B[2K"
def __init__(self):
self.check_max_width()
def replace_line(self, text, chars=None):
return '%s%s%s\r%s' % (self.CURSOR_SAVE,
self.CLEAR_LINE, text,
self.CURSOR_RESTORE)
def max_width(self):
return self.MAX_WIDTH
def check_max_width(self):
try:
import fcntl, termios, struct
fcntl_result = fcntl.ioctl(sys.stdin.fileno(),
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
h, w, hp, wp = struct.unpack('HHHH', fcntl_result)
self.MAX_WIDTH = (w-1)
except:
self.MAX_WIDTH = 79
class Completer(object):
"""Readline autocompler"""
DELIMS = ' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>?'
def __init__(self, session):
self.session = session
def _available_opts(self, text):
opts = ([s.SYNOPSIS[1] for s in mailpile.commands.COMMANDS] +
[s.SYNOPSIS[2] for s in mailpile.commands.COMMANDS] +
[t.name.lower() for t in self.session.config.tags.values()])
return sorted([o for o in opts if o and o.startswith(text)])
def _autocomplete(self, text, state):
try:
return self._available_opts(text)[state] + ' '
except IndexError:
return None
def get_completer(self):
return lambda t, s: self._autocomplete(t, s)
class UserInteraction:
"""Log the progress and performance of individual operations"""
MAX_BUFFER_LEN = 150
LOG_URGENT = 0
LOG_RESULT = 5
LOG_ERROR = 10
LOG_NOTIFY = 20
LOG_WARNING = 30
LOG_PROGRESS = 40
LOG_DEBUG = 50
LOG_ALL = 99
LOG_PREFIX = ''
def __init__(self, config, log_parent=None, log_prefix=None):
self.log_parent = log_parent
self.log_buffer = []
self.log_buffering = False
self.log_level = self.LOG_ALL
self.log_prefix = log_prefix or self.LOG_PREFIX
self.interactive = False
self.time_tracking = [('Main', [])]
self.time_elapsed = 0.0
self.render_mode = 'text'
self.term = NoColors()
self.config = config
self.html_variables = {
'title': 'Mailpile',
'name': 'Chelsea Manning',
'csrf': '',
'even_odd': 'odd',
'mailpile_size': 0
}
# Logging
def _fmt_log(self, text, level=LOG_URGENT):
c, w, clip = self.term.NONE, self.term.NORMAL, 2048
if level == self.LOG_URGENT:
c, w = self.term.RED, self.term.BOLD
elif level == self.LOG_ERROR:
c = self.term.RED
elif level == self.LOG_WARNING:
c = self.term.YELLOW
elif level == self.LOG_NOTIFY:
c = self.term.CYAN
elif level == self.LOG_DEBUG:
c = self.term.MAGENTA
elif level == self.LOG_PROGRESS:
c, clip = self.term.BLUE, 78
formatted = self.term.replace_line(self.term.color(
unicode(text[:clip]).encode('utf-8'), color=c, weight=w),
chars=len(text[:clip]))
if level != self.LOG_PROGRESS:
formatted += '\n'
return formatted
def _display_log(self, text, level=LOG_URGENT):
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent:
self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def _debug_log(self, text, level):
if text and 'log' in self.config.sys.debug:
if not text.startswith(self.log_prefix):
text = '%slog(%s): %s' % (self.log_prefix, level, text)
if self.log_parent:
return self.log_parent.log(level, text)
else:
self.term.write(self._fmt_log(text, level=level))
def clear_log(self):
self.log_buffer = []
def flush_log(self):
try:
while len(self.log_buffer) > 0:
level, message = self.log_buffer.pop(0)
if level <= self.log_level:
self._display_log(message, level)
except IndexError:
pass
def block(self):
self._display_log('')
self.log_buffering = True
def unblock(self):
self.log_buffering = False
self.flush_log()
def log(self, level, message):
if self.log_buffering:
self.log_buffer.append((level, message))
while len(self.log_buffer) > self.MAX_BUFFER_LEN:
self.log_buffer[0:(self.MAX_BUFFER_LEN/10)] = []
elif level <= self.log_level:
self._display_log(message, level)
error = lambda self, msg: self.log(self.LOG_ERROR, msg)
notify = lambda self, msg: self.log(self.LOG_NOTIFY, msg)
warning = lambda self, msg: self.log(self.LOG_WARNING, msg)
progress = lambda self, msg: self.log(self.LOG_PROGRESS, msg)
debug = lambda self, msg: self.log(self.LOG_DEBUG, msg)
# Progress indication and performance tracking
times = property(lambda self: self.time_tracking[-1][1])
def mark(self, action=None, percent=None):
"""Note that we are about to perform an action."""
if not action:
try:
action = self.times[-1][1]
except IndexError:
action = 'mark'
self.progress(action)
self.times.append((time.time(), action))
# print '(%s/%d) %s' % (self, len(self.time_tracking), action)
def report_marks(self, quiet=False, details=False):
t = self.times
# print '(%s/%d) REPORT' % (self, len(self.time_tracking))
if t and t[0]:
self.time_elapsed = elapsed = t[-1][0] - t[0][0]
if not quiet:
try:
self.notify(_('Elapsed: %.3fs (%s)') % (elapsed, t[-1][1]))
if details:
for i in range(0, len(self.times)-1):
e = t[i+1][0] - t[i][0]
self.debug(' -> %.3fs (%s)' % (e, t[i][1]))
except IndexError:
self.notify(_('Elapsed: %.3fs') % elapsed)
return elapsed
return 0
def reset_marks(self, mark=True, quiet=False, details=False):
"""This sequence of actions is complete."""
# print '(%s/%d) RESET' % (self, len(self.time_tracking))
if self.times and mark:
self.mark()
elapsed = self.report_marks(quiet=quiet, details=details)
self.times[:] = []
return elapsed
def push_marks(self, subtask):
"""Start tracking a new sub-task."""
# print '(%s/%d) PUSH' % (self, len(self.time_tracking))
self.time_tracking.append((subtask, []))
def pop_marks(self, name=None, quiet=True):
"""Sub-task ended!"""
elapsed = self.report_marks(quiet=quiet)
if len(self.time_tracking) > 1:
if not name or (self.time_tracking[-1][0] == name):
self.time_tracking.pop(-1)
# print '(%s/%d) POP' % (self, len(self.time_tracking))
return elapsed
# Higher level command-related methods
def _display_result(self, result):
sys.stdout.write(unicode(result).encode('utf-8').rstrip())
sys.stdout.write('\n')
def start_command(self, cmd, args, kwargs):
self.flush_log()
self.push_marks(cmd)
self.mark(('%s(%s)'
) % (cmd, ', '.join((args or tuple()) +
('%s' % kwargs, ))))
def finish_command(self, cmd):
self.pop_marks(name=cmd)
def display_result(self, result):
"""Render command result objects to the user"""
self._display_log('', level=self.LOG_RESULT)
if self.render_mode == 'json':
return self._display_result(result.as_('json'))
for suffix in ('css', 'html', 'js', 'rss', 'txt', 'xml'):
if self.render_mode.endswith(suffix):
jsuffix = 'j' + suffix
if self.render_mode in (suffix, jsuffix):
template = 'as.' + suffix
else:
template = self.render_mode.replace('.' + jsuffix,
'.' + suffix)
return self._display_result(
result.as_template(suffix, template=template))
return self._display_result(unicode(result))
# Creating output files
DEFAULT_DATA_NAME_FMT = '%(msg_mid)s.%(count)s_%(att_name)s.%(att_ext)s'
DEFAULT_DATA_ATTRS = {
'msg_mid': 'file',
'mimetype': 'application/octet-stream',
'att_name': 'unnamed',
'att_ext': 'dat',
'rand': '0000'
}
DEFAULT_DATA_EXTS = {
# FIXME: Add more!
'text/plain': 'txt',
'text/html': 'html',
'image/gif': 'gif',
'image/jpeg': 'jpg',
'image/png': 'png'
}
def _make_data_filename(self, name_fmt, attributes):
return (name_fmt or self.DEFAULT_DATA_NAME_FMT) % attributes
def _make_data_attributes(self, attributes={}):
attrs = self.DEFAULT_DATA_ATTRS.copy()
attrs.update(attributes)
attrs['rand'] = '%4.4x' % random.randint(0, 0xffff)
if attrs['att_ext'] == self.DEFAULT_DATA_ATTRS['att_ext']:
if attrs['mimetype'] in self.DEFAULT_DATA_EXTS:
attrs['att_ext'] = self.DEFAULT_DATA_EXTS[attrs['mimetype']]
return attrs
def open_for_data(self, name_fmt=None, attributes={}):
filename = self._make_data_filename(
name_fmt, self._make_data_attributes(attributes))
return filename, open(filename, 'w')
# Rendering helpers for templating and such
def render_json(self, data):
"""Render data as JSON"""
class NoFailEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, (list, dict, str, unicode,
int, float, bool, type(None))):
return JSONEncoder.default(self, obj)
if isinstance(obj, datetime.datetime):
return str(obj)
return "COMPLEXBLOB"
return json.dumps(data, indent=1, cls=NoFailEncoder,
sort_keys=True, allow_nan=False)
def _web_template(self, config, tpl_names, elems=None):
env = config.jinja_env
env.session = Session(config)
env.session.ui = HttpUserInteraction(None, config, log_parent=self)
for fn in tpl_names:
try:
# FIXME(Security): Here we need to sanitize the file name
# very strictly in case it somehow came
# from user data.
return env.get_template(fn)
except (IOError, OSError, AttributeError), e:
pass
return None
def render_web(self, cfg, tpl_names, data):
"""Render data as HTML"""
alldata = default_dict(self.html_variables)
alldata["config"] = cfg
alldata.update(data)
try:
template = self._web_template(cfg, tpl_names)
if template:
return template.render(alldata)
else:
emsg = _("<h1>Template not found</h1>\n<p>%s</p><p>"
"<b>DATA:</b> %s</p>")
tpl_esc_names = [escape_html(tn) for tn in tpl_names]
return emsg % (' or '.join(tpl_esc_names),
escape_html('%s' % alldata))
except (UndefinedError, ):
emsg = _("<h1>Template error</h1>\n"
"<pre>%s</pre>\n<p>%s</p><p><b>DATA:</b> %s</p>")
return emsg % (escape_html(traceback.format_exc()),
' or '.join([escape_html(tn) for tn in tpl_names]),
escape_html('%.4096s' % alldata))
except (TemplateNotFound, TemplatesNotFound), e:
emsg = _("<h1>Template not found in %s</h1>\n"
"<b>%s</b><br/>"
"<div><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.message,
'%.4096s' % alldata)])
except (TemplateError, TemplateSyntaxError,
TemplateAssertionError,), e:
emsg = _("<h1>Template error in %s</h1>\n"
"Parsing template %s: <b>%s</b> on line %s<br/>"
"<div><xmp>%s</xmp><hr><p><b>DATA:</b> %s</p></div>")
return emsg % tuple([escape_html(unicode(v))
for v in (e.name, e.filename, e.message,
e.lineno, e.source,
'%.4096s' % alldata)])
def edit_messages(self, session, emails):
if not self.interactive:
return False
for e in emails:
if not e.is_editable():
from mailpile.mailutils import NotEditableError
raise NotEditableError(_('Message %s is not editable')
% e.msg_mid())
sep = '-' * 79 + '\n'
edit_this = ('\n'+sep).join([e.get_editing_string() for e in emails])
self.block()
tf = tempfile.NamedTemporaryFile()
tf.write(edit_this.encode('utf-8'))
tf.flush()
os.system('%s %s' % (os.getenv('VISUAL', default='vi'), tf.name))
tf.seek(0, 0)
edited = tf.read().decode('utf-8')
tf.close()
self.unblock()
if edited == edit_this:
return False
updates = [t.strip() for t in edited.split(sep)]
if len(updates) != len(emails):
raise ValueError(_('Number of edit messages does not match!'))
for i in range(0, len(updates)):
emails[i].update_from_string(session, updates[i])
return True
def get_password(self, prompt):
if not self.interactive:
return ''
try:
self.block()
return getpass.getpass(prompt.encode('utf-8')).decode('utf-8')
finally:
self.unblock()
class HttpUserInteraction(UserInteraction):
LOG_PREFIX = 'http/'
def __init__(self, request, *args, **kwargs):
UserInteraction.__init__(self, *args, **kwargs)
self.request = request
self.logged = []
self.results = []
# Just buffer up rendered data
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
self.logged.append((level, text))
def _display_result(self, result):
self.results.append(result)
# Stream raw data to the client on open_for_data
def open_for_data(self, name_fmt=None, attributes={}):
return 'HTTP Client', RawHttpResponder(self.request, attributes)
def _render_text_responses(self, config):
if config.sys.debug:
return '%s\n%s' % (
'\n'.join([l[1] for l in self.logged]),
('\n%s\n' % ('=' * 79)).join(self.results)
)
else:
return ('\n%s\n' % ('=' * 79)).join(self.results)
def _render_single_response(self, config):
if len(self.results) == 1:
return self.results[0]
if len(self.results) > 1:
raise Exception(_('FIXME: Multiple results, OMG WTF'))
return ""
def render_response(self, config):
if (self.render_mode == 'json' or
self.render_mode.split('.')[-1] in ('jcss', 'jhtml', 'jjs',
'jrss', 'jtxt', 'jxml')):
if len(self.results) == 1:
return ('application/json', self.results[0])
else:
return ('application/json', '[%s]' % ','.join(self.results))
elif self.render_mode.endswith('html'):
return ('text/html', self._render_single_response(config))
elif self.render_mode.endswith('js'):
return ('text/javascript', self._render_single_response(config))
elif self.render_mode.endswith('css'):
return ('text/css', self._render_single_response(config))
elif self.render_mode.endswith('txt'):
return ('text/plain', self._render_single_response(config))
elif self.render_mode.endswith('rss'):
return ('application/rss+xml',
self._render_single_response(config))
elif self.render_mode.endswith('xml'):
return ('application/xml', self._render_single_response(config))
else:
return ('text/plain', self._render_text_responses(config))
def edit_messages(self, session, emails):
return False
class BackgroundInteraction(UserInteraction):
LOG_PREFIX = 'bg/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def edit_messages(self, session, emails):
return False
class SilentInteraction(UserInteraction):
LOG_PREFIX = 'silent/'
def _display_log(self, text, level=UserInteraction.LOG_URGENT):
self._debug_log(text, level)
def _display_result(self, result):
return result
def edit_messages(self, session, emails):
return False
class CapturingUserInteraction(UserInteraction):
def __init__(self, config):
mailpile.ui.UserInteraction.__init__(self, config)
self.captured = ''
def _display_result(self, result):
self.captured = unicode(result)
class RawHttpResponder:
def __init__(self, request, attributes={}):
self.raised = False
self.request = request
#
# FIXME: Security risks here, untrusted content may find its way into
# our raw HTTP headers etc.
#
mimetype = attributes.get('mimetype', 'application/octet-stream')
filename = attributes.get('filename', 'attachment.dat'
).replace('"', '')
disposition = attributes.get('disposition', 'attachment')
length = attributes['length']
request.send_http_response(200, 'OK')
headers = [
('Content-Length', length),
]
if disposition and filename:
encfilename = urllib.quote(filename.encode("utf-8"))
headers.append(('Content-Disposition',
'%s; filename*=UTF-8\'\'%s' % (disposition,
encfilename)))
elif disposition:
headers.append(('Content-Disposition', disposition))
request.send_standard_headers(header_list=headers,
mimetype=mimetype)
def write(self, data):
self.request.wfile.write(data)
def close(self):
if not self.raised:
self.raised = True
raise SuppressHtmlOutput()
class Session(object):
@classmethod
def Snapshot(cls, session, **copy_kwargs):
return cls(session.config).copy(session, **copy_kwargs)
def __init__(self, config):
self.config = config
self.main = False
self.ui = UserInteraction(config)
self.wait_lock = threading.Condition(UiRLock())
self.task_results = []
self.order = None
self.results = []
self.searched = []
self.displayed = None
self.context = None
def set_interactive(self, val):
self.ui.interactive = val
interactive = property(lambda s: s.ui.interactive,
lambda s, v: s.set_interactive(v))
def copy(self, session, ui=False, search=True):
if ui:
self.main = session.main
self.ui = session.ui
if search:
self.order = session.order
self.results = session.results[:]
self.searched = session.searched[:]
self.displayed = session.displayed
self.context = session.context
return self
def get_context(self, update=False):
if update or not self.context:
if self.searched:
sid = self.config.search_history.add(self.searched,
self.results,
self.order)
self.context = 'search:%s' % sid
return self.context
def load_context(self, context):
if self.context and self.context == context:
return context
try:
if context.startswith('search:'):
s, r, o = self.config.search_history.get(self, context[7:])
self.searched, self.results, self.order = s, r, o
self.displayed = None
self.context = context
return context
else:
return False
except (KeyError, ValueError):
return False
def report_task_completed(self, name, result):
with self.wait_lock:
self.task_results.append((name, result))
self.wait_lock.notify_all()
def report_task_failed(self, name):
self.report_task_completed(name, None)
def wait_for_task(self, wait_for, quiet=False):
while not mailpile.util.QUITTING:
with self.wait_lock:
for i in range(0, len(self.task_results)):
if self.task_results[i][0] == wait_for:
tn, rv = self.task_results.pop(i)
self.ui.reset_marks(quiet=quiet)
return rv
self.wait_lock.wait()
def error(self, message):
self.ui.error(message)
if not self.interactive:
sys.exit(1)
| |
"""
sentry.tsdb.redis
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
import logging
import operator
import random
import uuid
from binascii import crc32
from collections import defaultdict, namedtuple
from datetime import timedelta
from hashlib import md5
import six
from django.utils import timezone
from pkg_resources import resource_string
from redis.client import Script
from sentry.tsdb.base import BaseTSDB
from sentry.utils.dates import to_timestamp
from sentry.utils.redis import check_cluster_versions, get_cluster_from_options
from sentry.utils.versioning import Version
logger = logging.getLogger(__name__)
SketchParameters = namedtuple('SketchParameters', 'depth width capacity')
CountMinScript = Script(
None,
resource_string('sentry', 'scripts/tsdb/cmsketch.lua'),
)
class RedisTSDB(BaseTSDB):
"""
A time series storage backend for Redis.
The time series API supports three data types:
* simple counters
* distinct counters (number of unique elements seen)
* frequency tables (a set of items ranked by most frequently observed)
The backend also supports virtual nodes (``vnodes``) which controls shard
distribution. This value should be set to the anticipated maximum number of
physical hosts and not modified after data has been written.
Simple counters are stored in hashes. The key of the hash is composed of
the model, epoch (which defines the start of the rollup period), and a
shard identifier. This allows TTLs to be applied to the entire bucket,
instead of having to be stored for every individual element in the rollup
period. This results in a data layout that looks something like this::
{
"<model>:<epoch>:<shard id>": {
"<key>": value,
...
},
...
}
Distinct counters are stored using HyperLogLog, which provides a
cardinality estimate with a standard error of 0.8%. The data layout looks
something like this::
{
"<model>:<epoch>:<key>": value,
...
}
Frequency tables are modeled using two data structures:
* top-N index: a sorted set containing the most frequently observed items,
* estimation matrix: a hash table containing counters, used in a Count-Min sketch
Member scores are 100% accurate until the index is filled (and no memory is
used for the estimation matrix until this point), after which the data
structure switches to a probabilistic implementation and accuracy begins to
degrade for less frequently observed items, but remains accurate for more
frequently observed items.
Frequency tables are especially useful when paired with a (non-distinct)
counter of the total number of observations so that scores of items of the
frequency table can be displayed as percentages of the whole data set.
(Additional documentation and the bulk of the logic for implementing the
frequency table API can be found in the ``cmsketch.lua`` script.)
"""
DEFAULT_SKETCH_PARAMETERS = SketchParameters(3, 128, 50)
def __init__(self, prefix='ts:', vnodes=64, **options):
self.cluster, options = get_cluster_from_options('SENTRY_TSDB_OPTIONS', options)
self.prefix = prefix
self.vnodes = vnodes
self.enable_frequency_sketches = options.pop('enable_frequency_sketches', False)
super(RedisTSDB, self).__init__(**options)
def validate(self):
logger.debug('Validating Redis version...')
version = Version((2, 8, 18)) if self.enable_frequency_sketches else Version((2, 8, 9))
check_cluster_versions(
self.cluster,
version,
recommended=Version((2, 8, 18)),
label='TSDB',
)
def make_key(self, model, rollup, timestamp, key):
"""
Make a key that is used for distinct counter and frequency table
values.
"""
return '{prefix}{model}:{epoch}:{key}'.format(
prefix=self.prefix,
model=model.value,
epoch=self.normalize_ts_to_rollup(timestamp, rollup),
key=self.get_model_key(key),
)
def make_counter_key(self, model, epoch, model_key):
"""
Make a key that is used for counter values.
"""
if isinstance(model_key, six.integer_types):
vnode = model_key % self.vnodes
else:
vnode = crc32(model_key) % self.vnodes
return '{0}{1}:{2}:{3}'.format(self.prefix, model.value, epoch, vnode)
def get_model_key(self, key):
# We specialize integers so that a pure int-map can be optimized by
# Redis, whereas long strings (say tag values) will store in a more
# efficient hashed format.
if not isinstance(key, six.integer_types):
# enforce utf-8 encoding
if isinstance(key, unicode):
key = key.encode('utf-8')
return md5(repr(key)).hexdigest()
return key
def incr(self, model, key, timestamp=None, count=1):
self.incr_multi([(model, key)], timestamp, count)
def incr_multi(self, items, timestamp=None, count=1):
"""
Increment project ID=1 and group ID=5:
>>> incr_multi([(TimeSeriesModel.project, 1), (TimeSeriesModel.group, 5)])
"""
make_key = self.make_counter_key
normalize_to_rollup = self.normalize_to_rollup
if timestamp is None:
timestamp = timezone.now()
with self.cluster.map() as client:
for rollup, max_values in self.rollups:
norm_rollup = normalize_to_rollup(timestamp, rollup)
for model, key in items:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_rollup, model_key)
client.hincrby(hash_key, model_key, count)
client.expireat(
hash_key,
self.calculate_expiry(rollup, max_values, timestamp),
)
def get_range(self, model, keys, start, end, rollup=None):
"""
To get a range of data for group ID=[1, 2, 3]:
Start and end are both inclusive.
>>> now = timezone.now()
>>> get_keys(TimeSeriesModel.group, [1, 2, 3],
>>> start=now - timedelta(days=1),
>>> end=now)
"""
normalize_to_epoch = self.normalize_to_epoch
normalize_to_rollup = self.normalize_to_rollup
make_key = self.make_counter_key
if rollup is None:
rollup = self.get_optimal_rollup(start, end)
results = []
timestamp = end
with self.cluster.map() as client:
while timestamp >= start:
real_epoch = normalize_to_epoch(timestamp, rollup)
norm_epoch = normalize_to_rollup(timestamp, rollup)
for key in keys:
model_key = self.get_model_key(key)
hash_key = make_key(model, norm_epoch, model_key)
results.append((real_epoch, key,
client.hget(hash_key, model_key)))
timestamp = timestamp - timedelta(seconds=rollup)
results_by_key = defaultdict(dict)
for epoch, key, count in results:
results_by_key[key][epoch] = int(count.value or 0)
for key, points in results_by_key.iteritems():
results_by_key[key] = sorted(points.items())
return dict(results_by_key)
def record(self, model, key, values, timestamp=None):
self.record_multi(((model, key, values),), timestamp)
def record_multi(self, items, timestamp=None):
"""
Record an occurence of an item in a distinct counter.
"""
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
with self.cluster.fanout() as client:
for model, key, values in items:
c = client.target_key(key)
for rollup, max_values in self.rollups:
k = self.make_key(
model,
rollup,
ts,
key,
)
c.pfadd(k, *values)
c.expireat(
k,
self.calculate_expiry(
rollup,
max_values,
timestamp,
),
)
def get_distinct_counts_series(self, model, keys, start, end=None, rollup=None):
"""
Fetch counts of distinct items for each rollup interval within the range.
"""
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
responses = {}
with self.cluster.fanout() as client:
for key in keys:
c = client.target_key(key)
r = responses[key] = []
for timestamp in series:
r.append((
timestamp,
c.pfcount(
self.make_key(
model,
rollup,
timestamp,
key,
),
),
))
return {key: [(timestamp, promise.value) for timestamp, promise in value] for key, value in responses.iteritems()}
def get_distinct_counts_totals(self, model, keys, start, end=None, rollup=None):
"""
Count distinct items during a time range.
"""
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
responses = {}
with self.cluster.fanout() as client:
for key in keys:
# XXX: The current versions of the Redis driver don't implement
# ``PFCOUNT`` correctly (although this is fixed in the Git
# master, so should be available in the next release) and only
# supports a single key argument -- not the variadic signature
# supported by the protocol -- so we have to call the commnand
# directly here instead.
ks = []
for timestamp in series:
ks.append(self.make_key(model, rollup, timestamp, key))
responses[key] = client.target_key(key).execute_command('PFCOUNT', *ks)
return {key: value.value for key, value in responses.iteritems()}
def get_distinct_counts_union(self, model, keys, start, end=None, rollup=None):
if not keys:
return 0
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
temporary_id = uuid.uuid1().hex
def make_temporary_key(key):
return '{}{}:{}'.format(self.prefix, temporary_id, key)
def expand_key(key):
"""
Return a list containing all keys for each interval in the series for a key.
"""
return [self.make_key(model, rollup, timestamp, key) for timestamp in series]
router = self.cluster.get_router()
def map_key_to_host(hosts, key):
"""
Identify the host where a key is located and add it to the host map.
"""
hosts[router.get_host_for_key(key)].add(key)
return hosts
def get_partition_aggregate((host, keys)):
"""
Fetch the HyperLogLog value (in its raw byte representation) that
results from merging all HyperLogLogs at the provided keys.
"""
destination = make_temporary_key('p:{}'.format(host))
client = self.cluster.get_local_client(host)
with client.pipeline(transaction=False) as pipeline:
pipeline.execute_command(
'PFMERGE',
destination,
*itertools.chain.from_iterable(
map(expand_key, keys)
)
)
pipeline.get(destination)
pipeline.delete(destination)
return (host, pipeline.execute()[1])
def merge_aggregates(values):
"""
Calculate the cardinality of the provided HyperLogLog values.
"""
destination = make_temporary_key('a') # all values will be merged into this key
aggregates = {make_temporary_key('a:{}'.format(host)): value for host, value in values}
# Choose a random host to execute the reduction on. (We use a host
# here that we've already accessed as part of this process -- this
# way, we constrain the choices to only hosts that we know are
# running.)
client = self.cluster.get_local_client(random.choice(values)[0])
with client.pipeline(transaction=False) as pipeline:
pipeline.mset(aggregates)
pipeline.execute_command('PFMERGE', destination, *aggregates.keys())
pipeline.execute_command('PFCOUNT', destination)
pipeline.delete(destination, *aggregates.keys())
return pipeline.execute()[2]
# TODO: This could be optimized to skip the intermediate step for the
# host that has the largest number of keys if the final merge and count
# is performed on that host. If that host contains *all* keys, the
# final reduction could be performed as a single PFCOUNT, skipping the
# MSET and PFMERGE operations entirely.
return merge_aggregates(
map(
get_partition_aggregate,
reduce(
map_key_to_host,
keys,
defaultdict(set),
).items(),
)
)
def make_frequency_table_keys(self, model, rollup, timestamp, key):
prefix = self.make_key(model, rollup, timestamp, key)
return map(
operator.methodcaller('format', prefix),
('{}:c', '{}:i', '{}:e'),
)
def record_frequency_multi(self, requests, timestamp=None):
if not self.enable_frequency_sketches:
return
if timestamp is None:
timestamp = timezone.now()
ts = int(to_timestamp(timestamp)) # ``timestamp`` is not actually a timestamp :(
commands = {}
for model, request in requests:
for key, items in request.iteritems():
keys = []
expirations = {}
# Figure out all of the keys we need to be incrementing, as
# well as their expiration policies.
for rollup, max_values in self.rollups:
chunk = self.make_frequency_table_keys(model, rollup, ts, key)
keys.extend(chunk)
expiry = self.calculate_expiry(rollup, max_values, timestamp)
for k in chunk:
expirations[k] = expiry
arguments = ['INCR'] + list(self.DEFAULT_SKETCH_PARAMETERS)
for member, score in items.items():
arguments.extend((score, member))
# Since we're essentially merging dictionaries, we need to
# append this to any value that already exists at the key.
cmds = commands.setdefault(key, [])
cmds.append((CountMinScript, keys, arguments))
for k, t in expirations.items():
cmds.append(('EXPIREAT', k, t))
self.cluster.execute_commands(commands)
def get_most_frequent(self, model, keys, start, end=None, rollup=None, limit=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
arguments = ['RANKED']
if limit is not None:
arguments.append(int(limit))
commands = {}
for key in keys:
ks = []
for timestamp in series:
ks.extend(self.make_frequency_table_keys(model, rollup, timestamp, key))
commands[key] = [(CountMinScript, ks, arguments)]
results = {}
for key, responses in self.cluster.execute_commands(commands).items():
results[key] = [(member, float(score)) for member, score in responses[0].value]
return results
def get_most_frequent_series(self, model, keys, start, end=None, rollup=None, limit=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
arguments = ['RANKED']
if limit is not None:
arguments.append(int(limit))
commands = {}
for key in keys:
commands[key] = [(
CountMinScript,
self.make_frequency_table_keys(model, rollup, timestamp, key),
arguments,
) for timestamp in series]
def unpack_response(response):
return {item: float(score) for item, score in response.value}
results = {}
for key, responses in self.cluster.execute_commands(commands).items():
results[key] = zip(series, map(unpack_response, responses))
return results
def get_frequency_series(self, model, items, start, end=None, rollup=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
# Freeze ordering of the members (we'll need these later.)
for key, members in items.items():
items[key] = tuple(members)
commands = {}
for key, members in items.items():
ks = []
for timestamp in series:
ks.extend(self.make_frequency_table_keys(model, rollup, timestamp, key))
commands[key] = [(CountMinScript, ks, ('ESTIMATE',) + members)]
results = {}
for key, responses in self.cluster.execute_commands(commands).items():
members = items[key]
chunk = results[key] = []
for timestamp, scores in zip(series, responses[0].value):
chunk.append((timestamp, dict(zip(members, map(float, scores)))))
return results
def get_frequency_totals(self, model, items, start, end=None, rollup=None):
if not self.enable_frequency_sketches:
raise NotImplementedError("Frequency sketches are disabled.")
responses = {}
for key, series in self.get_frequency_series(model, items, start, end, rollup).iteritems():
response = responses[key] = {}
for timestamp, results in series:
for member, value in results.items():
response[member] = response.get(member, 0.0) + value
return responses
| |
#! /usr/bin/env python
#from exception import exception
import re
"""
Python class that parses a perftable file from GoldenGate
see 'help(perfTableParser.perfTableParser)' for details
"""
class localException(Exception):
pass
class perfTableParser:
"""
Classe permettant de parser les fichiers de perf de GoldenGate
Retourne une structure contenant les performances ( @TODO classe a definir)
"""
# Expression reguliere permettant de parser les noms de variables - cf 'processDeclarationName'
re_varName = re.compile(r"([a-zA-Z0-9_][a-zA-Z0-9_@<>]*)(\([a-zA-Z0-9_][a-zA-Z0-9_@<>]*\))?$")
def parseFile(self, parent, fileName):
"""
Parse le fichier dont le path est en argument
Met a jour le parent
"""
# Initialisation du parser
self.i__init()
self.parent = parent
try:
fHandle = open(fileName, 'r')
except:
self.errorMsg = "Unable to open file '" + fileName + "'"
raise exception(self.errorMsg)
try:
# Fonction interne de parse
self.i__parseFile(fHandle)
except localException ,e:
print "parsing failed at line ", self.lineNumber
print e
except ValueError, e:
print "parsing failed at line ", self.lineNumber
print e
# except Exception, e :
# print " INTERNAL[perfTableParser] : unknown exception found; please report bug"
# print e
# #raise e
else:
# Parsing seems OK
self.valid = True
fHandle.close()
return self.valid
def parseClipboard(self, parent, TextObject):
"""
Parse le text en argument
Met a jour le parent
"""
# Initialisation du parser
self.i__init()
self.parent = parent
try:
# Fonction interne de parse
self.i__parseText(TextObject)
except localException ,e:
print "parsing failed at line ", self.lineNumber
print e
except ValueError, e:
print "parsing failed at line ", self.lineNumber
print e
# except Exception, e :
# print " INTERNAL[perfTableParser] : unknown exception found; please report bug"
# print e
# #raise e
else:
# Parsing seems OK
self.valid = True
return self.valid
def i__init(self):
"""
Initializes the parsing - called by self.parse
"""
# Numero de ligne courant
self.lineNumber = 0
# Liste des variables
self.variablesList = None
# Number of datas per line
self.datasPerLine = 0
# Flag pour savoir si le parsing a reussi ou non
self.valid = False
# Message d'erreur
self.errorMsg = "?"
# Tableau final
self.myArray = []
# specification variable
self.specVarName = None
# performances
self.performancesNames =[]
def i__parseFile(self, fHandle):
"""
Fonction interne de parsing appelee par self.parseFile, qui retourne des exceptions en cas d'erreur
"""
numbersParse = False # True si on est en train de parser les datas
lastCommentLine = "%" # String de la derniere ligne de commentaires parsee
for line in fHandle:
self.lineNumber = self.lineNumber+1
# On debarasse la ligne des espaces & tabulations redondants et des caracs de fin de ligne
line = self.cleanUpLine(line)
if 0 <> len( line):
if numbersParse:
# Ligne de datas
self.processDataLine(line)
else:
# Ligne de 'commentaires'
if self.processCommentLine(line):
lastCommentLine = line
else:
# On est en train de commencer le parsing des datas
numbersParse = True
# La derniere ligne de commentaires contient les noms des variables
self.processDeclarationLine(lastCommentLine)
# On parse la premiere ligne de datas
self.processDataLine(line)
def i__parseText(self, TextObject):
"""
Fonction interne de parsing appelee par self.parseText, qui retourne des exceptions en cas d'erreur
"""
numbersParse = False # True si on est en train de parser les datas
for line in TextObject.split('\n'):
self.lineNumber = self.lineNumber+1
# On debarasse la ligne des espaces & tabulations redondants et des caracs de fin de ligne
line = self.cleanUpLine(line)
if 0 <> len( line):
if numbersParse:
# Ligne de datas
self.processDataLine(line)
else:
resultList = []
tokens = line.split()
for token in tokens:
myTupple = self.processDeclarationName(token)
resultList.append(myTupple)
self.variablesList = resultList
# On va maintenant calculer le nombre de datas attendues par ligne
finalSize = 0
for elt in resultList:
finalSize = finalSize + elt[2]
self.datasPerLine = finalSize
# On est en train de commencer le parsing des datas
numbersParse = True
## # On reconstruit la liste des inputs
## # Cas particulier sans sweep principal
## if None != self.specVarName:
## self.sweepVarNames.append(self.specVarName )
def cleanUpLine(self, line):
"""
Remove whitespaces, carriage return at the beginnig and at the end of the line
"""
line = line.expandtabs()
line = line.strip(None)
# Remove duplicate space
line = ' '.join(line.split())
return line
def processCommentLine(self, line):
"""
Parses the 'comment' line at the beginning of the file
returns True if it matches, else False
We also try to detect the sweep variables names
"""
if '%' <> line[0]:
return False
if line.startswith('% specification_variable'):
tmp = line.partition('% specification_variable')
tmpName = tmp[2].strip()
# Cas particulier ou aucun sweep principal n'est defini
if "<notDefined>" != tmpName:
self.specVarName = tmpName
elif line.startswith('% sweep_variables'):
tmp = line.partition('% sweep_variables')
tmpName = tmp[2]
# Cas particulier ou aucun sweep secondaire n'est defini
if " <notDefined>" != tmpName:
self.sweepVarNames = tmpName.split()
else:
self.sweepVarNames = []
elif line.startswith('%performance'):
tmp = line.split()
self.performancesNames.append(tmp[1])
return True
def processDeclarationLine(self, line):
"""
Processes the line that declares the variables , ie
% PIN PAE_c(RI) ACPR_left ACPR_right Pin Pout Pdc
% nom1 nom2 nom3(RI) nom4
si le nom n'est pas suivant d'une declaration entre parentheses, c'est un float
si le nom est suivi d'une decalaration entre parenthese, on attend (RI) ( reel / imaginaire)
Returns an array of tupples : [ (name, type, size), ...]
"""
resultList = []
tokens = line.split()
if '%' <> tokens.pop(0):
throw( "'%' expected at the begining of the variables declaration")
for token in tokens:
myTupple = self.processDeclarationName(token)
resultList.append(myTupple)
self.variablesList = resultList
# On va maintenant calculer le nombre de datas attendues par ligne
finalSize = 0
for elt in resultList:
finalSize = finalSize + elt[2]
self.datasPerLine = finalSize
return
def processDeclarationName(self, name):
"""
Traite une decalaration de nom dans la ligne de declaration des variables
Ex: "PIN" "PAE_c(RI)" ...
Returns a tupple : (name, type, size)
where:
'name' is the base name
'type' is the corresponding python type
'size' is the number of corresponding numbers of datas in the result array
"""
# On analyse chaque nom et on regarde si c'est un reel ou un complexe
myMatch = self.re_varName.match(name)
if None == myMatch:
raise localException( "Un-recognized variable declaration : '" + str(name) + "'" )
varName = myMatch.group(1)
myExtension = myMatch.group(2)
if None == myExtension:
myType = float
mySize = 1
elif "(RI)" == myExtension:
myType = complex
mySize = 2
else:
raise localException("Sorry, type '"+myExtension+"' is not supported")
return varName, myType, mySize
def processDataLine(self, line):
"""
Processes a line of datas
Checks that there is the right number of elements and that they are all floats
Returns a list of values corresponding to the variable types
"""
tokens = line.split()
if len(tokens) < self.datasPerLine:
raise localException( str(self.datasPerLine) + " values were expected, but I found " + str( len(tokens)) )
myList = []
for myTupple in self.variablesList:
lType = myTupple[1]
lSize = myTupple[2]
lArray = []
for i in range(lSize):
tmp = tokens.pop(0)
if tmp == '?':
# conversion des ? en 2
myFloat = float('2')
lArray.append(myFloat)
myNumber = lType(*lArray)
myList.append(myNumber)
elif tmp != 'X' and tmp != 'x':
# This will throw an error if data is not a float
myFloat = float(tmp)
lArray.append(myFloat)
# On convertit dans le type
myNumber = lType(*lArray)
myList.append(myNumber)
else:
myList.append(tmp.upper())
self.myArray.append(tuple(myList) )
return
def getSweepsNames(self):
return self.sweepVarNames
def getPerfsNames(self):
return self.performancesNames
################################################################################
| |
import os
from pathlib import Path
import random
from shutil import copytree, rmtree, make_archive
import string
import sys
from filecmp import dircmp
import uuid
import pytest
from ray.ray_constants import KV_NAMESPACE_PACKAGE
from ray.experimental.internal_kv import _internal_kv_del, _internal_kv_exists
from ray._private.runtime_env.packaging import (
_dir_travel,
get_local_dir_from_uri,
get_uri_for_directory,
_get_excludes,
upload_package_if_needed,
parse_uri,
Protocol,
get_top_level_dir_from_compressed_package,
remove_dir_from_filepaths,
unzip_package,
)
TOP_LEVEL_DIR_NAME = "top_level"
ARCHIVE_NAME = "archive.zip"
def random_string(size: int = 10):
return "".join(random.choice(string.ascii_uppercase) for _ in range(size))
@pytest.fixture
def random_dir(tmp_path):
subdir = tmp_path / "subdir"
subdir.mkdir()
for _ in range(10):
p1 = tmp_path / random_string(10)
with p1.open("w") as f1:
f1.write(random_string(100))
p2 = tmp_path / random_string(10)
with p2.open("w") as f2:
f2.write(random_string(200))
yield tmp_path
@pytest.fixture
def random_zip_file_without_top_level_dir(random_dir):
make_archive(
random_dir / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")], "zip", random_dir
)
yield str(random_dir / ARCHIVE_NAME)
@pytest.fixture
def random_zip_file_with_top_level_dir(tmp_path):
path = tmp_path
top_level_dir = path / TOP_LEVEL_DIR_NAME
top_level_dir.mkdir(parents=True)
next_level_dir = top_level_dir
for _ in range(10):
p1 = next_level_dir / random_string(10)
with p1.open("w") as f1:
f1.write(random_string(100))
p2 = next_level_dir / random_string(10)
with p2.open("w") as f2:
f2.write(random_string(200))
dir1 = next_level_dir / random_string(15)
dir1.mkdir(parents=True)
dir2 = next_level_dir / random_string(15)
dir2.mkdir(parents=True)
next_level_dir = dir2
make_archive(
path / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")],
"zip",
path,
TOP_LEVEL_DIR_NAME,
)
yield str(path / ARCHIVE_NAME)
class TestGetURIForDirectory:
def test_invalid_directory(self):
with pytest.raises(ValueError):
get_uri_for_directory("/does/not/exist")
with pytest.raises(ValueError):
get_uri_for_directory("does/not/exist")
def test_determinism(self, random_dir):
# Check that it's deterministic for same data.
uris = {get_uri_for_directory(random_dir) for _ in range(10)}
assert len(uris) == 1
# Add one file, should be different now.
with open(random_dir / f"test_{random_string()}", "w") as f:
f.write(random_string())
assert {get_uri_for_directory(random_dir)} != uris
def test_relative_paths(self, random_dir):
# Check that relative or absolute paths result in the same URI.
p = Path(random_dir)
relative_uri = get_uri_for_directory(os.path.relpath(p))
absolute_uri = get_uri_for_directory(p.resolve())
assert relative_uri == absolute_uri
def test_excludes(self, random_dir):
# Excluding a directory should modify the URI.
included_uri = get_uri_for_directory(random_dir)
excluded_uri = get_uri_for_directory(random_dir, excludes=["subdir"])
assert included_uri != excluded_uri
# Excluding a directory should be the same as deleting it.
rmtree((Path(random_dir) / "subdir").resolve())
deleted_uri = get_uri_for_directory(random_dir)
assert deleted_uri == excluded_uri
def test_empty_directory(self):
try:
os.mkdir("d1")
os.mkdir("d2")
assert get_uri_for_directory("d1") == get_uri_for_directory("d2")
finally:
os.rmdir("d1")
os.rmdir("d2")
def test_uri_hash_length(self, random_dir):
uri = get_uri_for_directory(random_dir)
hex_hash = uri.split("_")[-1][: -len(".zip")]
assert len(hex_hash) == 16
class TestUploadPackageIfNeeded:
def test_create_upload_once(self, tmp_path, random_dir, ray_start_regular):
uri = get_uri_for_directory(random_dir)
uploaded = upload_package_if_needed(uri, tmp_path, random_dir)
assert uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(uri, tmp_path, random_dir)
assert not uploaded
assert _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
# Delete the URI from the internal_kv. This should trigger re-upload.
_internal_kv_del(uri, namespace=KV_NAMESPACE_PACKAGE)
assert not _internal_kv_exists(uri, namespace=KV_NAMESPACE_PACKAGE)
uploaded = upload_package_if_needed(uri, tmp_path, random_dir)
assert uploaded
class TestGetTopLevelDirFromCompressedPackage:
def test_get_top_level_valid(self, random_zip_file_with_top_level_dir):
top_level_dir_name = get_top_level_dir_from_compressed_package(
str(random_zip_file_with_top_level_dir)
)
assert top_level_dir_name == TOP_LEVEL_DIR_NAME
def test_get_top_level_invalid(self, random_zip_file_without_top_level_dir):
top_level_dir_name = get_top_level_dir_from_compressed_package(
str(random_zip_file_without_top_level_dir)
)
assert top_level_dir_name is None
class TestRemoveDirFromFilepaths:
def test_valid_removal(self, random_zip_file_with_top_level_dir):
# This test copies the TOP_LEVEL_DIR_NAME directory, and then it
# shifts the contents of the copied directory into the base tmp_path
# directory. Then it compares the contents of tmp_path with the
# TOP_LEVEL_DIR_NAME directory to ensure that they match.
archive_path = random_zip_file_with_top_level_dir
tmp_path = archive_path[: archive_path.rfind(os.path.sep)]
original_dir_path = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME)
copy_dir_path = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME + "_copy")
copytree(original_dir_path, copy_dir_path)
remove_dir_from_filepaths(tmp_path, TOP_LEVEL_DIR_NAME + "_copy")
dcmp = dircmp(tmp_path, os.path.join(tmp_path, TOP_LEVEL_DIR_NAME))
# Since this test uses the tmp_path as the target directory, and since
# the tmp_path also contains the zip file and the top level directory,
# make sure that the only difference between the tmp_path's contents
# and the top level directory's contents are the zip file from the
# Pytest fixture and the top level directory itself. This implies that
# all files have been extracted from the top level directory and moved
# into the tmp_path.
assert set(dcmp.left_only) == {ARCHIVE_NAME, TOP_LEVEL_DIR_NAME}
# Make sure that all the subdirectories and files have been moved to
# the target directory
assert len(dcmp.right_only) == 0
@pytest.mark.parametrize("remove_top_level_directory", [False, True])
@pytest.mark.parametrize("unlink_zip", [False, True])
class TestUnzipPackage:
def dcmp_helper(
self, remove_top_level_directory, unlink_zip, tmp_subdir, tmp_path, archive_path
):
dcmp = None
if remove_top_level_directory:
dcmp = dircmp(tmp_subdir, os.path.join(tmp_path, TOP_LEVEL_DIR_NAME))
else:
dcmp = dircmp(
os.path.join(tmp_subdir, TOP_LEVEL_DIR_NAME),
os.path.join(tmp_path, TOP_LEVEL_DIR_NAME),
)
assert len(dcmp.left_only) == 0
assert len(dcmp.right_only) == 0
if unlink_zip:
assert not Path(archive_path).is_file()
else:
assert Path(archive_path).is_file()
def test_unzip_package(
self, random_zip_file_with_top_level_dir, remove_top_level_directory, unlink_zip
):
archive_path = random_zip_file_with_top_level_dir
tmp_path = archive_path[: archive_path.rfind(os.path.sep)]
tmp_subdir = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME + "_tmp")
unzip_package(
package_path=archive_path,
target_dir=tmp_subdir,
remove_top_level_directory=remove_top_level_directory,
unlink_zip=unlink_zip,
)
self.dcmp_helper(
remove_top_level_directory, unlink_zip, tmp_subdir, tmp_path, archive_path
)
def test_unzip_with_matching_subdirectory_names(
self,
remove_top_level_directory,
unlink_zip,
tmp_path,
):
path = tmp_path
top_level_dir = path / TOP_LEVEL_DIR_NAME
top_level_dir.mkdir(parents=True)
next_level_dir = top_level_dir
for _ in range(10):
dir1 = next_level_dir / TOP_LEVEL_DIR_NAME
dir1.mkdir(parents=True)
next_level_dir = dir1
make_archive(
path / ARCHIVE_NAME[: ARCHIVE_NAME.rfind(".")],
"zip",
path,
TOP_LEVEL_DIR_NAME,
)
archive_path = str(path / ARCHIVE_NAME)
tmp_path = archive_path[: archive_path.rfind(os.path.sep)]
tmp_subdir = os.path.join(tmp_path, TOP_LEVEL_DIR_NAME + "_tmp")
unzip_package(
package_path=archive_path,
target_dir=tmp_subdir,
remove_top_level_directory=remove_top_level_directory,
unlink_zip=unlink_zip,
)
self.dcmp_helper(
remove_top_level_directory,
unlink_zip,
tmp_subdir,
tmp_path,
archive_path,
)
@pytest.mark.skipif(sys.platform == "win32", reason="Fails on windows")
def test_travel(tmp_path):
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = tmp_path / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
v = random.randint(0, 1000)
with (path / uid).open("w") as f:
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
construct(root)
exclude_spec = _get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
_dir_travel(root, [exclude_spec], handler)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
@pytest.mark.parametrize(
"parsing_tuple",
[
("gcs://file.zip", Protocol.GCS, "file.zip"),
("s3://bucket/file.zip", Protocol.S3, "s3_bucket_file.zip"),
("https://test.com/file.zip", Protocol.HTTPS, "https_test_com_file.zip"),
("gs://bucket/file.zip", Protocol.GS, "gs_bucket_file.zip"),
],
)
def test_parsing(parsing_tuple):
uri, protocol, package_name = parsing_tuple
parsed_protocol, parsed_package_name = parse_uri(uri)
assert protocol == parsed_protocol
assert package_name == parsed_package_name
def test_get_local_dir_from_uri():
uri = "gcs://<working_dir_content_hash>.zip"
assert get_local_dir_from_uri(uri, "base_dir") == Path(
"base_dir/<working_dir_content_hash>"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| |
import os
import sys
import web
import simplejson
import utils
from __exceptions__ import formattedException
'''
Requires: web.py --> http://webpy.org/
'''
import threading
__version__ = '1.0.0'
import logging
from logging import handlers
__PROGNAME__ = os.path.splitext(os.path.basename(sys.argv[0]))[0]
LOG_FILENAME = os.sep.join([os.path.dirname(sys.argv[0]),'%s.log' % (__PROGNAME__)])
class MyTimedRotatingFileHandler(handlers.TimedRotatingFileHandler):
def __init__(self, filename, maxBytes=0, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
handlers.TimedRotatingFileHandler.__init__(self, filename=filename, when=when, interval=interval, backupCount=backupCount, encoding=encoding, delay=delay, utc=utc)
self.maxBytes = maxBytes
def shouldRollover(self, record):
response = handlers.TimedRotatingFileHandler.shouldRollover(self, record)
if (response == 0):
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
try:
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
except:
pass
return 0
return response
logger = logging.getLogger(__PROGNAME__)
handler = logging.FileHandler(LOG_FILENAME)
#handler = handlers.TimedRotatingFileHandler(LOG_FILENAME, when='d', interval=1, backupCount=30, encoding=None, delay=False, utc=False)
#handler = MyTimedRotatingFileHandler(LOG_FILENAME, maxBytes=1000000, when='d', backupCount=30)
#handler = handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=1000000, backupCount=30, encoding=None, delay=False)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
print 'Logging to "%s".' % (handler.baseFilename)
ch = logging.StreamHandler()
ch_format = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(ch_format)
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logging.getLogger().setLevel(logging.DEBUG)
urls = (
'/', 'Index',
'/nsca/(.+)', 'NSCAHelper',
'/nsca', 'NSCAHelper',
'/setwindowsagentaddr', 'Nothing',
'/setwindowsagentaddr/', 'Nothing',
)
### Templates
render = web.template.render('templates', base='base')
web.template.Template.globals.update(dict(
datestr = web.datestr,
render = render
))
def notfound():
return web.notfound("Sorry, the page you were looking for was not found. This message may be seen whenever someone tries to issue a negative number as part of the REST URL Signature and this is just not allowed at this time.")
__index__ = '''
<html>
<head>
<title>(c). Copyright 2013, AT&T, All Rights Reserved.</title>
<style>
#menu {
width: 200px;
float: left;
}
</style>
</head>
<body>
<ul id="menu">
<li><a href="/">Home</a></li>
</ul>
<p><b>UNAUTHORIZED ACCESS</b></p>
</body>
</html>
'''
class Index:
def GET(self):
""" Show page """
s = '%s %s' % (__PROGNAME__,__version__)
return __index__
class Nothing:
def POST(self):
web.header('Content-Type', 'text/html')
return __index__
__username__ = 'nscahelper'
__password__ = utils.md5('peekab00')
class NSCAHelper:
def GET(self):
web.header('Content-Type', 'text/html')
return __index__
def POST(self,uri):
'''
/nsca/nagios/update/config
{ "oper":"login",
"username":"nscahelper",
"password":"103136174d231aabe1de8feaf9afc92f",
"target":"nagios.cfg",
"cfg":"remote1_nagios2",
"service1": { "use":"generic-service",
"host_name":"remote1",
"service_description":"DISK_1",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service2": { "use":"generic-service",
"host_name":"remote1",
"service_description":"DISK_2",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
}
}
/nsca/nagios/send/nsca
{ "oper":"login",
"username":"nscahelper",
"password":"103136174d231aabe1de8feaf9afc92f",
"send_nsca": "localhost\\tDummy Service\\t2\\tlocalhost Mon Dec 23 22:03:50 UTC 2013",
"cfg":"/etc/send_nsca.cfg"
}
/nsca/nagios/create/config
{"oper": "login",
"username": "nscahelper",
"password": "103136174d231aabe1de8feaf9afc92f",
"target": "nagios.cfg",
"cfg": "remote2_nagios2",
"partitions": "awk '{print $4}' /proc/partitions | sed -e '/name/d' -e '/^$/d' -e '/[1-9]/!d'",
"host1": {
"use": "generic-host",
"host_name": "remote1",
"alias": "remote1",
"address": "0.0.0.0"
},
"command1": {
"command_name": "dummy_command2",
"command_line": "echo \"0\""
},
"service1": { "use":"generic-service",
"host_name":"remote1",
"service_description":"CPULoad",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service2": { "use":"generic-service",
"host_name":"remote1",
"service_description":"CurrentUsers",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service3": { "use":"generic-service",
"host_name":"remote1",
"service_description":"PING",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service4": { "use":"generic-service",
"host_name":"remote1",
"service_description":"SSH",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service5": { "use":"generic-service",
"host_name":"remote1",
"service_description":"TotalProcesses",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
},
"service6": { "use":"generic-service",
"host_name":"remote1",
"service_description":"ZombieProcesses",
"active_checks_enabled":"0",
"passive_checks_enabled":"1",
"check_command":"dummy_command2"
}
}
'''
logger.info('1. uri=%s' % (uri))
web.header('Content-Type', 'application/json')
logger.info('2. web.data()=%s' % (web.data()))
d = {}
status = ''
try:
payload = utils.SmartObject(simplejson.loads(web.data()))
except Exception, ex:
payload = utils.SmartObject()
content = formattedException(details=ex)
logger.exception(content)
d['exception1'] = content
try:
nagios_update_config = 'nagios/update/config'
nagios_create_config = 'nagios/create/config'
if (uri in [nagios_update_config,nagios_create_config]):
logger.info('3. payload.oper=%s' % (payload.oper))
if (payload.oper == 'login'):
logger.info('4. payload.username=%s' % (payload.username))
logger.info('5. payload.password=%s' % (payload.password))
if ( (payload.username == __username__) and (payload.password == __password__) ):
logger.info('6. payload.cfg=%s [%s]' % (payload.cfg,(payload.cfg is not None)))
if (payload.cfg is not None):
logger.info('7. utils.isUsingLinux=%s' % (utils.isUsingLinux))
nagios_cfg = str(payload.target) if (payload.target) else 'nagios.cfg'
if (utils.isUsingLinux):
if (nagios_cfg):
if (payload.cfg):
__cfg__ = None
__nagios_cfg__ = None
for top,dirs,files in utils.walk('/usr'):
#if (top.find('/usr/lib') > -1):
#logger.info('8. top=%s' % (top))
if (nagios_cfg in files):
#logger.debug('9. top=%s' % (top))
__nagios_cfg__ = os.sep.join([top,nagios_cfg])
logger.debug('10. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
for top,dirs,files in utils.walk('/etc'):
#logger.info('11. top=%s' % (top))
if (top.find('nagios') > -1):
#logger.debug('12. top=%s' % (top))
if (nagios_cfg in files):
logger.debug('13. top=%s' % (top))
__nagios_cfg__ = os.sep.join([top,nagios_cfg])
logger.debug('14. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
if (__nagios_cfg__) and (os.path.exists(__nagios_cfg__)):
logger.debug('20. __nagios_cfg__=%s [%s]' % (__nagios_cfg__,os.path.exists(__nagios_cfg__)))
for top,dirs,files in utils.walk(os.path.dirname(__nagios_cfg__)):
logger.debug('21. top=%s' % (top))
target_cfg = payload.cfg+'.cfg'
for f in files:
#logger.debug('22 f (%s) == target (%s) [%s]' % (f,target_cfg,(f == target_cfg)))
if (f == target_cfg):
__cfg__ = os.sep.join([top,f])
break
logger.debug('23. __cfg__=%s' % (__cfg__))
if (uri in [nagios_create_config]) and (__cfg__ is None):
__cfgd__ = os.sep.join([os.path.dirname(__nagios_cfg__),'conf.d'])
if (os.path.exists(__cfgd__)):
__cfg__ = __cfgd__
__cfg__ = os.sep.join([__cfg__,target_cfg])
logger.debug('24. __cfg__=%s' % (__cfg__))
logger.debug('25. __cfg__=%s [%s]' % (__cfg__,os.path.exists(__cfg__) if (__cfg__) else None))
if (payload.partitions):
logger.info('26. payload.partitions=%s' % (payload.partitions))
results = utils.shellexecute(payload.partitions)
logger.info('26.1 results=%s' % (results))
payload.partition_names = [str(r).strip() for r in results] if (utils.isList(results)) else results
logger.info('26.2 payload.partition_names=%s' % (payload.partition_names))
if (__cfg__) and (os.path.exists(__cfg__)) and (uri in [nagios_update_config]):
logger.debug('27. handle_disk_services !!!')
status = utils.handle_disk_services(__cfg__, payload,logger)
d['status'] = status
logger.debug('28. status=%s' % (status))
elif (__cfg__) and (uri in [nagios_create_config]):
logger.debug('29. handle_services !!!')
status = utils.handle_services(__cfg__, payload,logger)
d['status'] = status
logger.debug('30. status=%s' % (status))
else:
logger.exception('WARNING: Cannot handle config file of "%s".' % (__cfg__))
break
else:
logger.exception('WARNING: Cannot determine location of "%s".' % (nagios_cfg))
else:
logger.exception('WARNING: Cannot use or determine the valud of cfg which is "%s".' % (payload.cfg))
else:
logger.exception('WARNING: Cannot use nagios.cfg reference of "%s".' % (nagios_cfg))
else:
logger.exception('WARNING: Cannot run this program in any OS other than Linux, sorry.')
elif (uri == 'nagios/send/nsca'):
logger.info('3. payload.oper=%s' % (payload.oper))
if (payload.oper == 'login'):
logger.info('4. payload.username=%s' % (payload.username))
logger.info('5. payload.password=%s' % (payload.password))
if ( (payload.username == __username__) and (payload.password == __password__) ):
logger.info('6. payload.cfg=%s [%s]' % (payload.cfg,(payload.cfg is not None)))
if (payload.cfg is not None):
logger.info('7. utils.isUsingLinux=%s' % (utils.isUsingLinux))
send_nsca_cfg = str(payload.cfg)
if (utils.isUsingLinux):
if (send_nsca_cfg) and (os.path.exists(send_nsca_cfg)):
logger.info('8. send_nsca_cfg=%s' % (send_nsca_cfg))
results = utils.shellexecute('which send_nsca')
logger.info('9. results=%s' % (results))
__send_nsca__ = results[0].split('\n')[0] if (utils.isList(results)) else results.split('\n')[0]
logger.info('10. __send_nsca__=%s' % (__send_nsca__))
if (__send_nsca__) and (os.path.exists(__send_nsca__)):
logger.info('11. payload.send_nsca=%s' % (payload.send_nsca))
__cmd__ = 'printf "%%s\\n" "%s" | %s -H 127.0.0.1 -p 5667 -c %s' % (payload.send_nsca.replace('\\t','\t'),__send_nsca__,send_nsca_cfg)
logger.info('12. __cmd__=%s' % (__cmd__))
results = utils.shellexecute(__cmd__)
if (utils.isList(results)):
', '.join(results)
logger.info('13. results=%s' % (results))
d['status'] = results
else:
logger.exception('WARNING: Cannot determine location of send_nsca command from "%s".' % (__send_nsca__))
else:
logger.exception('WARNING: Cannot determine location of "%s".' % (send_nsca_cfg))
except Exception, ex:
content = formattedException(details=ex)
logger.exception(content)
d['exception2'] = content
return simplejson.dumps(d)
app = web.application(urls, globals())
app.notfound = notfound
if __name__ == '__main__':
'''
python nsca-helper-daemon.py
'''
import re
__re__ = re.compile(r"(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):([0-9]{1,5})", re.MULTILINE)
has_binding = any([__re__.match(arg) for arg in sys.argv])
if (not has_binding):
sys.argv.append('0.0.0.0:15667')
def __init__():
logger.info('%s %s started !!!' % (__PROGNAME__,__version__))
app.run()
t = threading.Thread(target=__init__)
t.daemon = False
t.start()
| |
import asyncio
import json
from asyncio import Future
from unittest import mock
import asynqp
import pytest
from asynqp_consumer import ConnectionParams, Consumer, Exchange, Queue, QueueBinding, Message
from asynqp_consumer.consumer import ConsumerCloseException
from tests.utils import future
def get_consumer(**kwargs):
return Consumer(
queue=Queue(
name='test_queue',
bindings=[
QueueBinding(
exchange=Exchange('test_exchange'),
routing_key='test_routing_key'
)
]
),
connection_params=[
ConnectionParams(
host='test_host',
port=1234,
username='test_username',
password='test_password',
virtual_host='test_virtual_host',
)
],
**kwargs
)
async def simple_callback(messages):
pass
@pytest.mark.asyncio
async def test_start__two_attempts(mocker, event_loop):
# arrange
consumer = get_consumer(callback=simple_callback)
mocker.patch.object(consumer, '_connect', side_effect=iter([OSError, future()]))
mocker.patch.object(consumer, '_disconnect', return_value=future())
mocker.patch.object(consumer, '_process_queue', return_value=future())
mocker.patch.object(consumer, '_check_bulk', return_value=future())
consumer._connection = mocker.Mock(spec=asynqp.Connection)
consumer._connection.closed = asyncio.Future(loop=event_loop)
mocker.patch('asynqp_consumer.consumer.gather', autospec=True, return_value=future())
Future = mocker.patch('asynqp_consumer.consumer.asyncio.Future', autospec=True)
Future.return_value.done.side_effect = iter([False, False, True])
Future.return_value._loop = event_loop
consumer._connection.closed.set_exception(ConsumerCloseException)
sleep = mocker.patch('asynqp_consumer.consumer.asyncio.sleep', return_value=future())
# act
await consumer.start(loop=event_loop)
# assert
assert consumer._connect.mock_calls == [
mocker.call(loop=event_loop),
mocker.call(loop=event_loop),
]
consumer._disconnect.assert_called_once_with()
consumer._process_queue.assert_called_once_with(loop=event_loop)
consumer._check_bulk.assert_called_once_with(loop=event_loop)
sleep.assert_called_once_with(3, loop=event_loop)
@pytest.mark.asyncio
async def test__connect__ok(mocker, event_loop):
# arrange
connection = mocker.Mock(spec=asynqp.Connection)
channel = mocker.Mock(spec=asynqp.Channel)
channel.set_qos.return_value = future()
connect_and_open_channel = mocker.patch('asynqp_consumer.consumer.connect_and_open_channel', autospec=True)
connect_and_open_channel.return_value = future((connection, channel))
asynqp_queue = mocker.Mock(spec=asynqp.Queue)
declare_queue = mocker.patch('asynqp_consumer.consumer.declare_queue', autospec=True)
declare_queue.return_value = future(asynqp_queue)
consumer = get_consumer(callback=simple_callback)
# act
await consumer._connect(loop=event_loop)
# assert
connect_and_open_channel.assert_called_once_with(ConnectionParams(
host='test_host',
port=1234,
username='test_username',
password='test_password',
virtual_host='test_virtual_host',
), event_loop)
declare_queue.assert_called_once_with(channel, Queue(
name='test_queue',
bindings=[
QueueBinding(
exchange=Exchange('test_exchange'),
routing_key='test_routing_key'
)
]
))
channel.set_qos.assert_called_once_with(prefetch_count=0)
assert consumer._connection is connection
assert consumer._channel is channel
assert consumer._queue is asynqp_queue
@pytest.mark.asyncio
async def test__disconnect_ok(mocker):
# arrange
consumer = get_consumer(callback=simple_callback)
connection = mocker.patch.object(consumer, '_connection', autospec=True)
connection.close.return_value = future()
channel = mocker.patch.object(consumer, '_channel', autospec=True)
channel.close.return_value = future()
# act
await consumer._disconnect()
# assert
consumer._connection.close.assert_called_once_with()
consumer._channel.close.assert_called_once_with()
def test_close(mocker):
# arrange
consumer = get_consumer(callback=simple_callback)
consumer._closed = asyncio.Future()
connection = mocker.patch.object(consumer, '_connection', autospec=True)
connection.closed = Future()
# act
consumer.close()
# assert
assert consumer._closed.done()
assert isinstance(consumer._closed.exception(), ConsumerCloseException)
class AsyncIter:
def __init__(self, iterable):
self.iterable = iter(iterable)
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.iterable)
except StopIteration:
raise StopAsyncIteration
@pytest.mark.asyncio
async def test__process_queue__when_prefetch_count_is_0(mocker, event_loop):
# arrange
consumer = get_consumer(callback=simple_callback, prefetch_count=0)
mocker.patch.object(consumer, '_get_messages_iterator', return_value=future(AsyncIter([
mock.Mock(spec=asynqp.IncomingMessage),
mock.Mock(spec=asynqp.IncomingMessage),
])))
mocker.patch.object(consumer, '_process_bulk', return_value=future())
# act
await consumer._process_queue(loop=event_loop)
# assert
assert len(consumer._messages) == 2
assert isinstance(consumer._messages[0], Message)
assert isinstance(consumer._messages[1], Message)
assert not consumer._process_bulk.called
@pytest.mark.asyncio
async def test__process_queue__when_prefetch_count_is_not_0(mocker, event_loop):
# arrange
consumer = get_consumer(callback=simple_callback, prefetch_count=1)
mocker.patch.object(consumer, '_get_messages_iterator', return_value=future(AsyncIter([
mock.Mock(spec=asynqp.IncomingMessage),
mock.Mock(spec=asynqp.IncomingMessage),
])))
mocker.patch.object(consumer, '_process_bulk', side_effect=iter([future(), future()]))
# act
await consumer._process_queue(loop=event_loop)
# assert
assert len(consumer._messages) == 2
assert isinstance(consumer._messages[0], Message)
assert isinstance(consumer._messages[1], Message)
consumer._process_bulk.mock_calls == [mocker.call(), mocker.call()]
@pytest.mark.asyncio
async def test__process_queue__when_message_is_invalid_json(mocker, event_loop):
# arrange
consumer = get_consumer(callback=simple_callback, prefetch_count=1)
message = mock.Mock(spec=asynqp.IncomingMessage)
message.json.side_effect = json.JSONDecodeError('message', '', 0)
message.body = 'Error json'
mocker.patch.object(consumer, '_get_messages_iterator', return_value=future(AsyncIter([message])))
# act
await consumer._process_queue(loop=event_loop)
# assert
assert consumer._messages == []
@pytest.mark.asyncio
async def test__process_queue__ack_message_when_json_is_invalid(mocker, event_loop, capsys):
# arrange
consumer = get_consumer(callback=simple_callback, prefetch_count=1, reject_invalid_json=False)
message = asynqp.IncomingMessage(
body="Invalid JSON",
sender=None,
delivery_tag=None,
exchange_name=None,
routing_key=None,
)
mocker.patch.object(message, 'ack', autospec=True)
mocker.patch.object(consumer, '_get_messages_iterator', return_value=future(AsyncIter([message])))
logger_exception = mocker.patch('asynqp_consumer.consumer.logger.exception')
# act
await consumer._process_queue(loop=event_loop)
# assert
assert consumer._messages == []
message.ack.assert_called_once_with()
logger_exception.assert_called_once_with('Failed to parse message body: %s', b'Invalid JSON')
class SomeException(Exception):
pass
@pytest.mark.asyncio
async def test__iter_messages(mocker, event_loop):
# arrange
queue = mocker.Mock(spec=asynqp.Queue)
queue.consume.return_value = future()
consumer = get_consumer(callback=simple_callback, prefetch_count=0)
mocker.patch.object(consumer, '_queue', new=queue)
queue = mocker.patch('asynqp_consumer.consumer.asyncio.Queue').return_value
queue.get.side_effect = iter([
future(Message(mock.Mock(spec=asynqp.IncomingMessage))),
future(Message(mock.Mock(spec=asynqp.IncomingMessage))),
future(exception=SomeException),
])
# act
result = []
with pytest.raises(SomeException):
messages_iterator = await consumer._get_messages_iterator(loop=event_loop)
async for message in messages_iterator:
result.append(message)
# assert
assert len(result) == 2
assert isinstance(result[0], Message)
assert isinstance(result[1], Message)
@pytest.mark.asyncio
async def test__consume_with_arguments(mocker, event_loop):
# arrange
queue = mocker.Mock(spec=asynqp.Queue)
queue.consume.return_value = future()
consume_arguments = {'x-priority': 100}
consumer = get_consumer(callback=simple_callback, prefetch_count=0, consume_arguments=consume_arguments)
mocker.patch.object(consumer, '_queue', new=queue)
asyncio_queue = mocker.patch('asynqp_consumer.consumer.asyncio.Queue').return_value
# act
await consumer._get_messages_iterator(loop=event_loop)
# assert
queue.consume.assert_called_once_with(callback=asyncio_queue.put_nowait, arguments=consume_arguments)
| |
import os
import datetime
import tensorflow as tf
import numpy as np
from sentiment import SentimentAnalysisModel
from sentiment.w2v_model import Word2VecModel
class SentimentCNN(SentimentAnalysisModel):
MODEL_FILE_NAME = 'sentiment_model.ckpt'
WRITE_SUMMARY = False
def __init__(self,
session,
embeddings_model_path,
embeddings_vocab_path,
embeddings_size,
sentence_length,
n_labels,
filter_sizes=(3,),
n_filters=1,
filter_stride=(1, 1, 1, 1),
dropout_keep_prob=0.5,
l2_lambda=0.0,
learning_rate=0.05,
batch_size=10,
n_steps=10000,
validation_check_steps=500,
summary_path='/tmp/tensorboard',
model_save_path=None):
super().__init__()
self.session = session
self.sentence_length = sentence_length
self.n_labels = n_labels
self.filter_sizes = filter_sizes
self.n_filters = n_filters
self.filter_stride = filter_stride
self.dropout_keep_prob_value = dropout_keep_prob
self.l2_lambda = l2_lambda
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_steps = n_steps
self.check_steps = validation_check_steps
self.summary_path = summary_path
self.model_save_path = model_save_path
self._word2vec = Word2VecModel(sess=session)
self.load_embeddings(embeddings_model_path, embeddings_vocab_path, embeddings_size)
self.embeddings_shape = self._word2vec.get_embeddings_shape()
self._x = None
self._y = None
self._dropout_keep_prob = None
self._logits = None
self._loss = None
self._prediction = None
self._accuracy = None
self._optimizer = None
self._w = []
self._b = []
self.saver = None
self.build_graph()
def load_embeddings(self, model_path, vocab_path, embeddings_size):
self._word2vec.load_model(model_path, vocab_path, embeddings_size)
def create_model(self, data):
embed = tf.nn.embedding_lookup(self._word2vec.w_in, data, name='embedding')
embed = tf.expand_dims(embed, -1)
filters = []
for filter_id, filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-maxpool-{}-{}'.format(filter_id, filter_size)):
weights = tf.Variable(
tf.truncated_normal([filter_size, self.embeddings_shape[1], 1, self.n_filters], stddev=0.1),
name='w'
)
bias = tf.Variable(tf.zeros([self.n_filters]), name='b')
self._w.append(weights)
self._b.append(bias)
conv = tf.nn.conv2d(embed, weights, list(self.filter_stride), padding='VALID', name='conv')
relu = tf.nn.relu(tf.nn.bias_add(conv, bias), name='relu')
pool = tf.nn.max_pool(relu,
[1, self.sentence_length - filter_size + 1, 1, 1],
[1, 1, 1, 1],
padding='VALID',
name='pool')
filters.append(pool)
concat = tf.concat(3, filters)
h = tf.reshape(concat, [-1, len(filters) * self.n_filters])
h_shape = h.get_shape().as_list()
with tf.name_scope("fc"):
fc_weights = tf.Variable(tf.truncated_normal([h_shape[1], self.n_labels], stddev=0.1), name="w")
fc_biases = tf.Variable(tf.constant(0.0, shape=[self.n_labels]), name="b")
h = tf.matmul(h, fc_weights) + fc_biases
self._w.append(fc_weights)
self._b.append(fc_biases)
with tf.name_scope("dropout"):
h = tf.nn.dropout(h, self._dropout_keep_prob)
return h
def loss(self, logits, labels, weights=None, biases=None):
with tf.name_scope("xent"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
l2_reg = 0.0
if weights:
l2_reg += sum(tf.map_fn(tf.nn.l2_loss, weights))
if biases:
l2_reg += sum(tf.map_fn(tf.nn.l2_loss, biases))
loss += self.l2_lambda * l2_reg
return loss
def optimze(self, loss):
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
return optimizer
def build_graph(self):
self._x = tf.placeholder(tf.int32, shape=(None, self.sentence_length), name='x')
self._y = tf.placeholder(tf.float32, shape=(None, self.n_labels), name='y')
self._dropout_keep_prob = tf.placeholder(dtype=tf.float32, name='dropout_prob')
self._logits = self.create_model(self._x)
self._prediction = tf.nn.softmax(self._logits, name='prediction')
self._accuracy = self.tf_accuracy(self._prediction, self._y)
self._loss = self.loss(self._logits, self._y)
self._optimizer = self.optimze(self._loss)
if self.model_save_path is None:
print('WARNING: model_save_path is not specified, model won\'t be saved!')
else:
self.saver = tf.train.Saver()
def train(self,
train_dataset, train_labels,
valid_dataset=None, valid_labels=None,
test_dataset=None, test_labels=None):
train_dataset, train_labels = self.prepare_dataset(train_dataset, train_labels)
valid_dataset, valid_labels = self.prepare_dataset(valid_dataset, valid_labels)
test_dataset, test_labels = self.prepare_dataset(test_dataset, test_labels)
has_validation_set = valid_dataset is not None and valid_labels is not None
has_test_set = test_dataset is not None and test_labels is not None
print('Train dataset: size = {}; shape = {}'.format(len(train_dataset), train_dataset.shape))
if has_validation_set:
print('Valid dataset: size = {}; shape = {}'.format(len(valid_dataset), valid_dataset.shape))
if has_test_set:
print('Test dataset: size = {}; shape = {}'.format(len(test_dataset), test_dataset.shape))
tf_train_loss_summary = tf.scalar_summary("train_loss", self._loss)
tf_valid_loss_summary = tf.scalar_summary("valid_loss", self._loss)
tf_train_accuracy_summary = tf.scalar_summary('train_accuracy', self._accuracy)
tf_valid_accuracy_summary = tf.scalar_summary('valid_accuracy', self._accuracy)
writer = None
if self.WRITE_SUMMARY:
writer = tf.train.SummaryWriter(self.summary_path, self.session.graph)
tf.initialize_all_variables().run(session=self.session)
loss, accuracy = 0, 0
for step in range(self.n_steps + 1):
offset = (step * self.batch_size) % (train_labels.shape[0] - self.batch_size)
batch_data = train_dataset[offset:(offset + self.batch_size), :]
batch_labels = train_labels[offset:(offset + self.batch_size), :]
feed_dict = {
self._x: batch_data,
self._y: batch_labels,
self._dropout_keep_prob: self.dropout_keep_prob_value
}
_, loss, accuracy, loss_summary, accuracy_summary = self.session.run(
[self._optimizer, self._loss, self._accuracy, tf_train_loss_summary, tf_train_accuracy_summary],
feed_dict=feed_dict
)
if writer:
writer.add_summary(loss_summary, step)
writer.add_summary(accuracy_summary, step)
print("{}: step {}, loss {:g}, accuracy {:g}".format(datetime.datetime.now().isoformat(),
step, loss, accuracy))
if step % self.check_steps == 0:
if has_validation_set is not None:
feed_dict = {
self._x: valid_dataset,
self._y: valid_labels,
self._dropout_keep_prob: 1.0
}
loss, accuracy, loss_summary, accuracy_summary = self.session.run(
[self._loss, self._accuracy, tf_valid_loss_summary, tf_valid_accuracy_summary],
feed_dict=feed_dict
)
if writer:
writer.add_summary(loss_summary, step)
writer.add_summary(accuracy_summary, step)
print()
print("VALIDATION: {}: step {}, loss {:g}, accuracy {:g}".format(datetime.datetime.now().isoformat(),
step, loss, accuracy))
print()
if self.model_save_path and self.saver:
self.save()
return loss, accuracy
def save(self):
if self.model_save_path and self.saver:
save_path = self.saver.save(self.session, os.path.join(self.model_save_path, self.MODEL_FILE_NAME))
print('Model saved in file: {}'.format(save_path))
return save_path
else:
raise Exception('Can\'t save: model_save_path is None')
def restore(self):
if self.model_save_path and self.saver:
full_path = os.path.join(self.model_save_path, self.MODEL_FILE_NAME)
self.saver.restore(self.session, full_path)
print('Model restored from file: {}'.format(full_path))
else:
raise Exception('Can\'t restore: model_save_path is None')
def predict(self, words):
words, _ = self.prepare_dataset(np.asarray([words]))
feed_dict = {
self._x: words,
self._dropout_keep_prob: 1.0
}
prediction = self.session.run(
[self._prediction],
feed_dict=feed_dict
)
return prediction[0][0]
@staticmethod
def tf_accuracy(predictions, labels, tf_accuracy_name='accuracy'):
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(labels, 1))
acc = tf.reduce_mean(tf.cast(correct_prediction, "float"))
return acc
@staticmethod
def accuracy(predictions, labels):
return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]
def prepare_dataset(self, dataset, labels=None):
if dataset is None and labels is None:
return None, None
assert labels is None or dataset.shape[0] == labels.shape[0]
processed_dataset = np.ndarray((len(dataset), self.sentence_length), dtype=np.int32)
if labels is not None:
processed_labels = np.ndarray(labels.shape, dtype=np.int32)
else:
processed_labels = None
real_dataset_length = 0
empty_sents = 0
for i, source_words in enumerate(dataset):
words = self._word2vec.word2id_many(source_words)
if words:
if len(words) < self.sentence_length:
sentence_padding = self.sentence_length - len(words)
words = np.pad(words, (0, sentence_padding), mode='constant')
elif len(words) > self.sentence_length:
words = words[:self.sentence_length]
processed_dataset[real_dataset_length, :] = words
if processed_labels is not None:
processed_labels[real_dataset_length] = labels[i]
real_dataset_length += 1
elif len(words) == 0:
empty_sents += 1
processed_dataset = processed_dataset[:real_dataset_length, :]
if processed_labels is not None:
processed_labels = processed_labels[:real_dataset_length]
return processed_dataset, processed_labels[:real_dataset_length]
return processed_dataset, None
def close(self):
self.session.close()
| |
"""Constants and functions defined by the CSS specification, not specific to
Sass.
"""
from fractions import Fraction
from math import pi
import re
import six
# ------------------------------------------------------------------------------
# Built-in CSS color names
# See: http://www.w3.org/TR/css3-color/#svg-color
COLOR_NAMES = {
'aliceblue': (240, 248, 255, 1),
'antiquewhite': (250, 235, 215, 1),
'aqua': (0, 255, 255, 1),
'aquamarine': (127, 255, 212, 1),
'azure': (240, 255, 255, 1),
'beige': (245, 245, 220, 1),
'bisque': (255, 228, 196, 1),
'black': (0, 0, 0, 1),
'blanchedalmond': (255, 235, 205, 1),
'blue': (0, 0, 255, 1),
'blueviolet': (138, 43, 226, 1),
'brown': (165, 42, 42, 1),
'burlywood': (222, 184, 135, 1),
'cadetblue': (95, 158, 160, 1),
'chartreuse': (127, 255, 0, 1),
'chocolate': (210, 105, 30, 1),
'coral': (255, 127, 80, 1),
'cornflowerblue': (100, 149, 237, 1),
'cornsilk': (255, 248, 220, 1),
'crimson': (220, 20, 60, 1),
'cyan': (0, 255, 255, 1),
'darkblue': (0, 0, 139, 1),
'darkcyan': (0, 139, 139, 1),
'darkgoldenrod': (184, 134, 11, 1),
'darkgray': (169, 169, 169, 1),
'darkgreen': (0, 100, 0, 1),
'darkkhaki': (189, 183, 107, 1),
'darkmagenta': (139, 0, 139, 1),
'darkolivegreen': (85, 107, 47, 1),
'darkorange': (255, 140, 0, 1),
'darkorchid': (153, 50, 204, 1),
'darkred': (139, 0, 0, 1),
'darksalmon': (233, 150, 122, 1),
'darkseagreen': (143, 188, 143, 1),
'darkslateblue': (72, 61, 139, 1),
'darkslategray': (47, 79, 79, 1),
'darkturquoise': (0, 206, 209, 1),
'darkviolet': (148, 0, 211, 1),
'deeppink': (255, 20, 147, 1),
'deepskyblue': (0, 191, 255, 1),
'dimgray': (105, 105, 105, 1),
'dodgerblue': (30, 144, 255, 1),
'firebrick': (178, 34, 34, 1),
'floralwhite': (255, 250, 240, 1),
'forestgreen': (34, 139, 34, 1),
'fuchsia': (255, 0, 255, 1),
'gainsboro': (220, 220, 220, 1),
'ghostwhite': (248, 248, 255, 1),
'gold': (255, 215, 0, 1),
'goldenrod': (218, 165, 32, 1),
'gray': (128, 128, 128, 1),
'green': (0, 128, 0, 1),
'greenyellow': (173, 255, 47, 1),
'honeydew': (240, 255, 240, 1),
'hotpink': (255, 105, 180, 1),
'indianred': (205, 92, 92, 1),
'indigo': (75, 0, 130, 1),
'ivory': (255, 255, 240, 1),
'khaki': (240, 230, 140, 1),
'lavender': (230, 230, 250, 1),
'lavenderblush': (255, 240, 245, 1),
'lawngreen': (124, 252, 0, 1),
'lemonchiffon': (255, 250, 205, 1),
'lightblue': (173, 216, 230, 1),
'lightcoral': (240, 128, 128, 1),
'lightcyan': (224, 255, 255, 1),
'lightgoldenrodyellow': (250, 250, 210, 1),
'lightgreen': (144, 238, 144, 1),
'lightgrey': (211, 211, 211, 1),
'lightpink': (255, 182, 193, 1),
'lightsalmon': (255, 160, 122, 1),
'lightseagreen': (32, 178, 170, 1),
'lightskyblue': (135, 206, 250, 1),
'lightslategray': (119, 136, 153, 1),
'lightsteelblue': (176, 196, 222, 1),
'lightyellow': (255, 255, 224, 1),
'lime': (0, 255, 0, 1),
'limegreen': (50, 205, 50, 1),
'linen': (250, 240, 230, 1),
'magenta': (255, 0, 255, 1),
'maroon': (128, 0, 0, 1),
'mediumaquamarine': (102, 205, 170, 1),
'mediumblue': (0, 0, 205, 1),
'mediumorchid': (186, 85, 211, 1),
'mediumpurple': (147, 112, 219, 1),
'mediumseagreen': (60, 179, 113, 1),
'mediumslateblue': (123, 104, 238, 1),
'mediumspringgreen': (0, 250, 154, 1),
'mediumturquoise': (72, 209, 204, 1),
'mediumvioletred': (199, 21, 133, 1),
'midnightblue': (25, 25, 112, 1),
'mintcream': (245, 255, 250, 1),
'mistyrose': (255, 228, 225, 1),
'moccasin': (255, 228, 181, 1),
'navajowhite': (255, 222, 173, 1),
'navy': (0, 0, 128, 1),
'oldlace': (253, 245, 230, 1),
'olive': (128, 128, 0, 1),
'olivedrab': (107, 142, 35, 1),
'orange': (255, 165, 0, 1),
'orangered': (255, 69, 0, 1),
'orchid': (218, 112, 214, 1),
'palegoldenrod': (238, 232, 170, 1),
'palegreen': (152, 251, 152, 1),
'paleturquoise': (175, 238, 238, 1),
'palevioletred': (219, 112, 147, 1),
'papayawhip': (255, 239, 213, 1),
'peachpuff': (255, 218, 185, 1),
'peru': (205, 133, 63, 1),
'pink': (255, 192, 203, 1),
'plum': (221, 160, 221, 1),
'powderblue': (176, 224, 230, 1),
'purple': (128, 0, 128, 1),
'rebeccapurple': (0x66, 0x33, 0x99, 1),
'red': (255, 0, 0, 1),
'rosybrown': (188, 143, 143, 1),
'royalblue': (65, 105, 225, 1),
'saddlebrown': (139, 69, 19, 1),
'salmon': (250, 128, 114, 1),
'sandybrown': (244, 164, 96, 1),
'seagreen': (46, 139, 87, 1),
'seashell': (255, 245, 238, 1),
'sienna': (160, 82, 45, 1),
'silver': (192, 192, 192, 1),
'skyblue': (135, 206, 235, 1),
'slateblue': (106, 90, 205, 1),
'slategray': (112, 128, 144, 1),
'snow': (255, 250, 250, 1),
'springgreen': (0, 255, 127, 1),
'steelblue': (70, 130, 180, 1),
'tan': (210, 180, 140, 1),
'teal': (0, 128, 128, 1),
'thistle': (216, 191, 216, 1),
'tomato': (255, 99, 71, 1),
'transparent': (0, 0, 0, 0),
'turquoise': (64, 224, 208, 1),
'violet': (238, 130, 238, 1),
'wheat': (245, 222, 179, 1),
'white': (255, 255, 255, 1),
'whitesmoke': (245, 245, 245, 1),
'yellow': (255, 255, 0, 1),
'yellowgreen': (154, 205, 50, 1),
}
COLOR_LOOKUP = dict((v, k) for (k, v) in COLOR_NAMES.items())
# ------------------------------------------------------------------------------
# Built-in CSS units
# See: http://www.w3.org/TR/2013/CR-css3-values-20130730/#numeric-types
# Maps units to a set of common units per type, with conversion factors
BASE_UNIT_CONVERSIONS = {
# Lengths
'mm': (1, 'mm'),
'cm': (10, 'mm'),
'in': (Fraction(254, 10), 'mm'),
'px': (Fraction(254, 960), 'mm'),
'pt': (Fraction(254, 720), 'mm'),
'pc': (Fraction(254, 60), 'mm'),
# Angles
'deg': (Fraction(1, 360), 'turn'),
'grad': (Fraction(1, 400), 'turn'),
'rad': (Fraction.from_float(pi / 2), 'turn'),
'turn': (1, 'turn'),
# Times
'ms': (1, 'ms'),
's': (1000, 'ms'),
# Frequencies
'hz': (1, 'hz'),
'khz': (1000, 'hz'),
# Resolutions
'dpi': (1, 'dpi'),
'dpcm': (Fraction(254 / 100), 'dpi'),
'dppx': (96, 'dpi'),
}
def get_conversion_factor(unit):
"""Look up the "base" unit for this unit and the factor for converting to
it.
Returns a 2-tuple of `factor, base_unit`.
"""
if unit in BASE_UNIT_CONVERSIONS:
return BASE_UNIT_CONVERSIONS[unit]
else:
return 1, unit
def convert_units_to_base_units(units):
"""Convert a set of units into a set of "base" units.
Returns a 2-tuple of `factor, new_units`.
"""
total_factor = 1
new_units = []
for unit in units:
if unit not in BASE_UNIT_CONVERSIONS:
continue
factor, new_unit = BASE_UNIT_CONVERSIONS[unit]
total_factor *= factor
new_units.append(new_unit)
new_units.sort()
return total_factor, tuple(new_units)
def count_base_units(units):
"""Returns a dict mapping names of base units to how many times they
appear in the given iterable of units. Effectively this counts how
many length units you have, how many time units, and so forth.
"""
ret = {}
for unit in units:
factor, base_unit = get_conversion_factor(unit)
ret.setdefault(base_unit, 0)
ret[base_unit] += 1
return ret
def cancel_base_units(units, to_remove):
"""Given a list of units, remove a specified number of each base unit.
Arguments:
units: an iterable of units
to_remove: a mapping of base_unit => count, such as that returned from
count_base_units
Returns a 2-tuple of (factor, remaining_units).
"""
# Copy the dict since we're about to mutate it
to_remove = to_remove.copy()
remaining_units = []
total_factor = Fraction(1)
for unit in units:
factor, base_unit = get_conversion_factor(unit)
if not to_remove.get(base_unit, 0):
remaining_units.append(unit)
continue
total_factor *= factor
to_remove[base_unit] -= 1
return total_factor, remaining_units
# A fixed set of units can be omitted when the value is 0
# See: http://www.w3.org/TR/2013/CR-css3-values-20130730/#lengths
ZEROABLE_UNITS = frozenset((
# Relative lengths
'em', 'ex', 'ch', 'rem',
# Viewport
'vw', 'vh', 'vmin', 'vmax',
# Absolute lengths
'cm', 'mm', 'in', 'px', 'pt', 'pc',
))
# ------------------------------------------------------------------------------
# Built-in CSS function reference
# Known function names
BUILTIN_FUNCTIONS = frozenset([
# CSS2
'attr', 'counter', 'counters', 'url', 'rgb', 'rect',
# CSS3 values: http://www.w3.org/TR/css3-values/
'calc', 'min', 'max', 'cycle',
# CSS3 colors: http://www.w3.org/TR/css3-color/
'rgba', 'hsl', 'hsla',
# CSS3 fonts: http://www.w3.org/TR/css3-fonts/
'local', 'format',
# CSS3 images: http://www.w3.org/TR/css3-images/
'image', 'element',
'linear-gradient', 'radial-gradient',
'repeating-linear-gradient', 'repeating-radial-gradient',
# CSS3 transforms: http://www.w3.org/TR/css3-transforms/
'perspective',
'matrix', 'matrix3d',
'rotate', 'rotateX', 'rotateY', 'rotateZ', 'rotate3d',
'translate', 'translateX', 'translateY', 'translateZ', 'translate3d',
'scale', 'scaleX', 'scaleY', 'scaleZ', 'scale3d',
'skew', 'skewX', 'skewY',
# CSS3 transitions: http://www.w3.org/TR/css3-transitions/
'cubic-bezier', 'steps',
# CSS filter effects:
# https://dvcs.w3.org/hg/FXTF/raw-file/tip/filters/index.html
'grayscale', 'sepia', 'saturate', 'hue-rotate', 'invert', 'opacity',
'brightness', 'contrast', 'blur', 'drop-shadow', 'custom',
# CSS shapes
# https://www.w3.org/TR/css-shapes-1/
'inset', 'circle', 'ellipse', 'polygon',
# CSS4 image module:
# http://dev.w3.org/csswg/css-images/
'image-set', 'cross-fade',
'conic-gradient', 'repeating-conic-gradient',
# Others
'color-stop', # Older version of CSS3 gradients
'mask', # ???
'from', 'to', # Very old WebKit gradient syntax
])
def is_builtin_css_function(name):
"""Returns whether the given `name` looks like the name of a builtin CSS
function.
Unrecognized functions not in this list produce warnings.
"""
name = name.replace('_', '-')
if name in BUILTIN_FUNCTIONS:
return True
# Vendor-specific functions (-foo-bar) are always okay
if name[0] == '-' and '-' in name[1:]:
return True
return False
# ------------------------------------------------------------------------------
# CSS character set determination
# Based upon: http://www.w3.org/TR/CSS2/syndata.html#charset
def determine_encoding(buf):
"""Return the appropriate encoding for the given CSS source, according to
the CSS charset rules.
`buf` may be either a string or bytes.
"""
# The ultimate default is utf8; bravo, W3C
bom_encoding = 'UTF-8'
if not buf:
# What
return bom_encoding
if isinstance(buf, six.text_type):
# We got a file that, for whatever reason, produces already-decoded
# text. Check for the BOM (which is useless now) and believe
# whatever's in the @charset.
if buf[0] == '\ufeff':
buf = buf[0:]
# This is pretty similar to the code below, but without any encoding
# double-checking.
charset_start = '@charset "'
charset_end = '";'
if buf.startswith(charset_start):
start = len(charset_start)
end = buf.index(charset_end, start)
return buf[start:end]
else:
return bom_encoding
# BOMs
if buf[:3] == b'\xef\xbb\xbf':
bom_encoding = 'UTF-8'
buf = buf[3:]
if buf[:4] == b'\x00\x00\xfe\xff':
bom_encoding = 'UTF-32BE'
buf = buf[4:]
elif buf[:4] == b'\xff\xfe\x00\x00':
bom_encoding = 'UTF-32LE'
buf = buf[4:]
if buf[:4] == b'\x00\x00\xff\xfe':
raise UnicodeError("UTF-32-2143 is not supported")
elif buf[:4] == b'\xfe\xff\x00\x00':
raise UnicodeError("UTF-32-2143 is not supported")
elif buf[:2] == b'\xfe\xff':
bom_encoding = 'UTF-16BE'
buf = buf[2:]
elif buf[:2] == b'\xff\xfe':
bom_encoding = 'UTF-16LE'
buf = buf[2:]
# The spec requires exactly this syntax; no escapes or extra spaces or
# other shenanigans, thank goodness.
charset_start = '@charset "'.encode(bom_encoding)
charset_end = '";'.encode(bom_encoding)
if buf.startswith(charset_start):
start = len(charset_start)
end = buf.index(charset_end, start)
encoded_encoding = buf[start:end]
encoding = encoded_encoding.decode(bom_encoding)
# Ensure that decoding with the specified encoding actually produces
# the same @charset rule
encoded_charset = buf[:end + len(charset_end)]
if (encoded_charset.decode(encoding) !=
encoded_charset.decode(bom_encoding)):
raise UnicodeError(
"@charset {0} is incompatible with detected encoding {1}"
.format(bom_encoding, encoding))
else:
# With no @charset, believe the BOM
encoding = bom_encoding
return encoding
# ------------------------------------------------------------------------------
# Bits and pieces of the official CSS grammar
# These are the only pseudo-elements allowed to be specified with a single
# colon, for backwards compatibility
CSS2_PSEUDO_ELEMENTS = frozenset((
':after',
':before',
':first-line',
':first-letter',
))
# CSS escape sequences are either a backslash followed by a single character,
# or a backslash followed by one to six hex digits and a single optional
# whitespace. Escaped newlines become nothing.
# Ref: http://dev.w3.org/csswg/css-syntax-3/#consume-an-escaped-code-point
escape_rx = re.compile(r"(?s)\\([0-9a-fA-F]{1,6})[\n\t ]?|\\(.)|\\\n")
def _unescape_one(match):
if match.group(1) is not None:
try:
return six.unichr(int(match.group(1), 16))
except ValueError:
return (r'\U%08x' % int(match.group(1), 16)).decode(
'unicode-escape')
elif match.group(2) is not None:
return match.group(2)
else:
return six.text_type()
def unescape(string):
"""Given a raw CSS string (i.e. taken directly from CSS source with no
processing), eliminate all backslash escapes.
"""
return escape_rx.sub(_unescape_one, string)
# ------------------------------------------------------------------------------
# Ad-hoc regexes specific to pyscss
_expr_glob_re = re.compile(r'''
\#\{(.*?)\} # Global Interpolation only
''', re.VERBOSE)
# XXX these still need to be fixed; the //-in-functions thing is a chumpy hack
_ml_comment_re = re.compile(r'\/\*(.*?)\*\/', re.DOTALL)
_sl_comment_re = re.compile(r'(?<!\burl[(])(?<!\w{2}:)\/\/.*')
_escape_chars_re = re.compile(r'([^-a-zA-Z0-9_])')
_interpolate_re = re.compile(r'(#\{\s*)?(\$[-\w]+)(?(1)\s*\})')
_spaces_re = re.compile(r'\s+')
_collapse_properties_space_re = re.compile(r'([:#])\s*{')
_variable_re = re.compile('^\\$[-a-zA-Z0-9_]+$')
_strings_re = re.compile(r'([\'"]).*?\1')
# TODO i know, this is clumsy and won't always work; it's better than nothing
_urls_re = re.compile(r'url[(].*?[)]')
_has_placeholder_re = re.compile(r'(?<!\w)([a-z]\w*)?%')
_prop_split_re = re.compile(r'[:=]')
_has_code_re = re.compile('''
(?:^|(?<=[{;}])) # the character just before it should be a '{', a ';' or a '}'
\s* # ...followed by any number of spaces
(?:
(?:
\+
| @include
| @warn
| @mixin
| @function
| @if
| @else
| @for
| @each
)
(?![^(:;}]*['"])
|
@import
)
''', re.VERBOSE)
| |
""" common utilities """
import itertools
from warnings import catch_warnings
import numpy as np
from pandas.compat import lrange
from pandas.core.dtypes.common import is_scalar
from pandas import Series, DataFrame, Panel, date_range, UInt64Index
from pandas.util import testing as tm
from pandas.io.formats.printing import pprint_thing
_verbose = False
def _mklbl(prefix, n):
return ["%s%s" % (prefix, i) for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base(object):
""" indexing comprehensive base class """
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
with catch_warnings(record=True):
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
with catch_warnings(record=True):
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
with catch_warnings(record=True):
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
with catch_warnings(record=True):
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
with catch_warnings(record=True):
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
with catch_warnings(record=True):
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
with catch_warnings(record=True):
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def generate_indices(self, f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def get_result(self, obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
with catch_warnings(record=True):
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def get_value(self, f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = self.get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
assert rs == xp
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is None:
continue
def _call(obj=obj):
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
# Panel deprecations
if isinstance(obj, Panel):
with catch_warnings(record=True):
_call()
else:
_call()
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_curve_geometry114
except ImportError:
bt_curve_geometry114 = sys.modules["onshape_client.oas.models.bt_curve_geometry114"]
try:
from onshape_client.oas.models import bt_curve_geometry_spline118_all_of
except ImportError:
bt_curve_geometry_spline118_all_of = sys.modules[
"onshape_client.oas.models.bt_curve_geometry_spline118_all_of"
]
class BTCurveGeometrySpline118(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"control_point_count": (int,), # noqa: E501
"control_points": ([float],), # noqa: E501
"degree": (int,), # noqa: E501
"is_periodic": (bool,), # noqa: E501
"is_rational": (bool,), # noqa: E501
"knots": ([float],), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"control_point_count": "controlPointCount", # noqa: E501
"control_points": "controlPoints", # noqa: E501
"degree": "degree", # noqa: E501
"is_periodic": "isPeriodic", # noqa: E501
"is_rational": "isRational", # noqa: E501
"knots": "knots", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_curve_geometry_spline118.BTCurveGeometrySpline118 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
control_point_count (int): [optional] # noqa: E501
control_points ([float]): [optional] # noqa: E501
degree (int): [optional] # noqa: E501
is_periodic (bool): [optional] # noqa: E501
is_rational (bool): [optional] # noqa: E501
knots ([float]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_curve_geometry114.BTCurveGeometry114,
bt_curve_geometry_spline118_all_of.BTCurveGeometrySpline118AllOf,
],
"oneOf": [],
}
| |
import os
import os.path
import errno
import sys
import traceback
import zipfile
import platform
import re
import htmlentitydefs
import json
from pytz import timezone
import datetime
import time
from lxml import html, etree
import scrapelib
import pprint
import logging
import subprocess
import smtplib
import email.utils
from email.mime.text import MIMEText
import getpass
# read in an opt-in config file for changing directories and supplying email settings
# returns None if it's not there, and this should always be handled gracefully
path = "config.yml"
if os.path.exists(path):
# Don't use a cached config file, just in case, and direct_yaml_load is not yet defined.
import yaml
config = yaml.load(open(path))
else:
config = None
eastern_time_zone = timezone('US/Eastern')
# scraper should be instantiated at class-load time, so that it can rate limit appropriately
scraper = scrapelib.Scraper(requests_per_minute=120, retry_attempts=3)
scraper.user_agent = "unitedstates/congress (https://github.com/unitedstates/congress)"
def format_datetime(obj):
if isinstance(obj, datetime.datetime):
return eastern_time_zone.localize(obj.replace(microsecond=0)).isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, (str, unicode)):
return obj
else:
return None
def current_congress():
year = current_legislative_year()
return congress_from_legislative_year(year)
def congress_from_legislative_year(year):
return ((year + 1) / 2) - 894
def current_legislative_year(date=None):
if not date:
date = datetime.datetime.now()
year = date.year
if date.month == 1:
if date.day == 1 or date.day == 2:
return date.year - 1
elif date.day == 3 and date.hour < 12:
return date.year - 1
else:
return date.year
else:
return date.year
def get_congress_first_year(congress):
return (((int(congress) + 894) * 2) - 1)
# get the three calendar years that the Congress extends through (Jan 3 to Jan 3).
def get_congress_years(congress):
y1 = get_congress_first_year(congress)
return (y1, y1 + 1, y1 + 2)
# Get a list of Congresses associated with a particular term.
# XXX: This can be highly unreliable and may be deeply flawed.
# XXX: This would be much simpler if we already included Congresses in the data.
def get_term_congresses(term):
start_year = int(format_datetime(term["start"])[:4])
end_year = int(format_datetime(term["end"])[:4])
start_congress = congress_from_legislative_year(start_year)
start_congress_years = get_congress_years(start_congress)
start_congress_first_year = start_congress_years[0]
if term["type"] in ["sen"]:
end_congress_years = get_congress_years(start_congress + 2)
congresses = [start_congress, start_congress + 1, start_congress + 2]
elif term["type"] in ["prez", "viceprez"] or term["state"] in ["PR"]:
end_congress_years = get_congress_years(start_congress + 1)
congresses = [start_congress, start_congress + 1]
else:
end_congress_years = start_congress_years
congresses = [start_congress]
end_congress_last_year = end_congress_years[2]
valid_congresses = (start_year >= start_congress_first_year) and (end_year <= end_congress_last_year)
# if not valid_congresses:
# print term["type"], start_congress, (start_year, start_congress_first_year), (end_year, end_congress_last_year)
return congresses if valid_congresses else []
# bill_type, bill_number, congress
def split_bill_id(bill_id):
return re.match("^([a-z]+)(\d+)-(\d+)$", bill_id).groups()
# "hjres1234-115"
def build_bill_id(bill_type, bill_number, congress):
return "%s%s-%s" % (bill_type, bill_number, congress)
# bill_type, bill_number, congress, version_code
def split_bill_version_id(bill_version_id):
return re.match("^([a-z]+)(\d+)-(\d+)-([a-z\d]+)$", bill_version_id).groups()
# "hjres1234-115-enr"
def build_bill_version_id(bill_type, bill_number, congress, version_code):
return "%s%s-%s-%s" % (bill_type, bill_number, congress, version_code)
def split_vote_id(vote_id):
# Sessions are either four-digit years for modern day votes or a digit or letter
# for historical votes before sessions were basically calendar years.
return re.match("^(h|s)(\d+)-(\d+).(\d\d\d\d|[0-9A-Z])$", vote_id).groups()
# nomination_type (always PN), nomination_number, congress
# nomination_number is usually a number, but can be hyphenated, e.g. PN64-01-111
# which would produce a nomination_number of "64-01"
def split_nomination_id(nomination_id):
try:
return re.match("^([A-z]{2})([\d-]+)-(\d+)$", nomination_id).groups()
except Exception, e:
logging.error("Unabled to parse %s" % nomination_id)
return (None, None, None)
def process_set(to_fetch, fetch_func, options, *extra_args):
errors = []
saved = []
skips = []
for id in to_fetch:
try:
results = fetch_func(id, options, *extra_args)
except Exception, e:
if options.get('raise', False):
raise
else:
errors.append((id, e, format_exception(e)))
continue
if results.get('ok', False):
if results.get('saved', False):
saved.append(id)
logging.info("[%s] Updated" % id)
else:
skips.append(id)
logging.warn("[%s] Skipping: %s" % (id, results['reason']))
else:
errors.append((id, results, None))
logging.error("[%s] Error: %s" % (id, results['reason']))
if len(errors) > 0:
message = "\nErrors for %s items:\n" % len(errors)
for id, error, msg in errors:
message += "\n\n"
if isinstance(error, Exception):
message += "[%s] Exception:\n\n" % id
message += msg
else:
message += "[%s] %s" % (id, error)
admin(message) # email if possible
logging.warning("\nErrors for %s." % len(errors))
logging.warning("Skipped %s." % len(skips))
logging.warning("Saved data for %s." % len(saved))
return saved + skips # all of the OK's
# Download file at `url`, cache to `destination`.
# Takes many options to customize behavior.
_download_zip_files = {}
def download(url, destination=None, options={}):
# uses cache by default, override (True) to ignore
force = options.get('force', False)
# saves in cache dir by default, override (False) to save to exact destination
to_cache = options.get('to_cache', True)
# unescapes HTML encoded characters by default, set this (True) to not do that
is_binary = options.get('binary', False)
# used by test suite to use special (versioned) test cache dir
test = options.get('test', False)
# if need a POST request with data
postdata = options.get('postdata', False)
timeout = float(options.get('timeout', 30)) # The low level socket api requires a float
urlopen_kwargs = {'timeout': timeout}
# caller cares about actually bytes or only success/fail
needs_content = options.get('needs_content', True) or not is_binary or postdata
# form the path to the file if we intend on saving it to disk
if destination:
if to_cache:
if test:
cache = test_cache_dir()
else:
cache = cache_dir()
cache_path = os.path.join(cache, destination)
else:
cache_path = destination
# If we are working in the cache directory, look for a zip file
# anywhere along the path like "cache/93/bills.zip", and see if
# the file is already cached inside it (e.g. as 'bills/pages/...").
# If it is, and force is true, then raise an Exception because we
# can't update the ZIP file with new content (I imagine it would
# be very slow). If force is false, return the content from the
# archive.
if destination and to_cache:
dparts = destination.split(os.sep)
for i in xrange(len(dparts) - 1):
# form the ZIP file name and test if it exists...
zfn = os.path.join(cache, *dparts[:i + 1]) + ".zip"
if not os.path.exists(zfn):
continue
# load and keep the ZIP file instance in memory because it's slow to instantiate this object
zf = _download_zip_files.get(zfn)
if not zf:
zf = zipfile.ZipFile(zfn, "r")
_download_zip_files[zfn] = zf
logging.warn("Loaded: %s" % zfn)
# see if the inner file exists, and if so read the bytes
try:
zfn_inner = os.path.join(*dparts[i:])
body = zf.read(zfn_inner)
except KeyError:
# does not exist
continue
if not test:
logging.info("Cached: (%s, %s)" % (zfn + "#" + zfn_inner, url))
if force:
raise Exception("Cannot re-download a file already cached to a ZIP file.")
if not is_binary:
body = body.decode("utf8")
body = unescape(body)
return body
# Load the file from disk if it's already been downloaded and force is False.
if destination and (not force) and os.path.exists(cache_path):
if not test:
logging.info("Cached: (%s, %s)" % (cache_path, url))
if not needs_content:
return True
with open(cache_path, 'r') as f:
body = f.read()
if not is_binary:
body = body.decode("utf8")
# Download from the network and cache to disk.
else:
try:
logging.info("Downloading: %s" % url)
if postdata:
response = scraper.urlopen(url, 'POST', postdata, **urlopen_kwargs)
else:
# If we're just downloading the file and the caller doesn't
# need the response data, then starting wget to download the
# file is much faster for large files. Don't know why. Something
# hopefully we can improve in scrapelib in the future.
#
# needs_content is currently only set to false when downloading
# bill text files like PDFs.
#
# Skip this fast path if wget is not present in its expected location.
with open(os.devnull, 'w') as tempf:
if platform.system() == 'Windows':
wget_exists = (subprocess.call("where wget", stdout=tempf, stderr=tempf, shell=True) == 0)
else:
wget_exists = (subprocess.call("which wget", stdout=tempf, stderr=tempf, shell=True) == 0)
if not needs_content and wget_exists:
mkdir_p(os.path.dirname(cache_path))
if subprocess.call(["wget", "-q", "-O", cache_path, url]) == 0:
return True
else:
# wget failed. when that happens it leaves a zero-byte file on disk, which
# for us means we've created an invalid file, so delete it.
os.unlink(cache_path)
return None
response = scraper.urlopen(url, **urlopen_kwargs)
if not is_binary:
body = response # a subclass of a 'unicode' instance
if not isinstance(body, unicode):
raise ValueError("Content not decoded.")
else:
body = response.bytes # a 'str' instance
if isinstance(body, unicode):
raise ValueError("Binary content improperly decoded.")
except scrapelib.HTTPError as e:
logging.error("Error downloading %s:\n\n%s" % (url, format_exception(e)))
return None
# don't allow 0-byte files
if (not body) or (not body.strip()):
return None
# cache content to disk
if destination:
write(body if is_binary else body.encode("utf8"), cache_path)
if not is_binary:
body = unescape(body)
return body
def write(content, destination, options={}):
if options.get("diff"):
# Instead of writing the file, do a comparison with what's on disk
# to test any changes. But be nice and replace any update date with
# what's in the previous file so we avoid spurrious changes. Use
# how updated_at appears in the JSON and in the XML.
if os.path.exists(destination):
with open(destination) as f:
existing_content = f.read()
for pattern in ('"updated_at": ".*?"', 'updated=".*?"'):
m1 = re.search(pattern, existing_content)
m2 = re.search(pattern, content)
if m1 and m2:
content = content.replace(m2.group(0), m1.group(0))
# Avoid writing to disk and spawning `diff` by checking if
# the files match in memory.
if content == existing_content:
return
# Shell `diff` and let it display output directly to the console.
# Write `content` to disk first so diff can see it. Maybe more
# efficient to pipe?
fn = "/tmp/congress-changed-file"
with open(fn, 'w') as f:
f.write(content)
os.system("diff -u %s %s" % (destination, fn))
os.unlink(fn)
return
# Save the content to disk.
mkdir_p(os.path.dirname(destination))
f = open(destination, 'w')
f.write(content)
f.close()
def write_json(data, destination):
return write(
json.dumps(data,
sort_keys=True,
indent=2,
default=format_datetime
),
destination
)
def read(destination):
if os.path.exists(destination):
with open(destination) as f:
return f.read()
# dict1 gets overwritten with anything in dict2
def merge(dict1, dict2):
return dict(dict1.items() + dict2.items())
# de-dupe a list, taken from:
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def uniq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
import os
import errno
# mkdir -p in python, from:
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def xpath_regex(doc, element, pattern):
return doc.xpath(
"//%s[re:match(text(), '%s')]" % (element, pattern),
namespaces={"re": "http://exslt.org/regular-expressions"})
# taken from http://effbot.org/zone/re-sub.htm#unescape-html
def unescape(text):
def remove_unicode_control(str):
remove_re = re.compile(u'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]')
return remove_re.sub('', str)
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
text = re.sub("&#?\w+;", fixup, text)
text = remove_unicode_control(text)
return text
def extract_bills(text, session):
bill_ids = []
p = re.compile('((S\.|H\.)(\s?J\.|\s?R\.|\s?Con\.| ?)(\s?Res\.)*\s?\d+)', flags=re.IGNORECASE)
bill_matches = p.findall(text)
if bill_matches:
for b in bill_matches:
bill_text = "%s-%s" % (b[0].lower().replace(" ", '').replace('.', '').replace("con", "c"), session)
if bill_text not in bill_ids:
bill_ids.append(bill_text)
return bill_ids
# uses config values if present
def cache_dir():
cache = None
if config:
output = config.get('output', None)
if output:
cache = output.get('cache', None)
if not cache:
cache = "cache"
return cache
def test_cache_dir():
return "test/fixtures/cache"
# uses config values if present
def data_dir():
data = None
if config:
output = config.get('output', None)
if output:
data = output.get('data', None)
if not data:
data = "data"
return data
# if email settings are supplied, email the text - otherwise, just print it
def admin(body):
try:
if isinstance(body, Exception):
body = format_exception(body)
logging.error(body) # always print it
if config:
details = config.get('email', None)
if details:
send_email(body)
except Exception as exception:
print "Exception logging message to admin, halting as to avoid loop"
print format_exception(exception)
def format_exception(exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
# this should only be called if the settings are definitely there
def send_email(message):
settings = config['email']
# adapted from http://www.doughellmann.com/PyMOTW/smtplib/
msg = MIMEText(message)
msg.set_unixfrom('author')
msg['To'] = email.utils.formataddr(('Recipient', settings['to']))
msg['From'] = email.utils.formataddr((settings['from_name'], settings['from']))
msg['Subject'] = settings['subject']
server = smtplib.SMTP(settings['hostname'])
try:
server.ehlo()
if settings['starttls'] and server.has_extn('STARTTLS'):
server.starttls()
server.ehlo()
server.login(settings['user_name'], settings['password'])
server.sendmail(settings['from'], [settings['to']], msg.as_string())
finally:
server.quit()
logging.info("Sent email to %s" % settings['to'])
thomas_types = {
'hr': ('HR', 'H.R.'),
'hres': ('HE', 'H.RES.'),
'hjres': ('HJ', 'H.J.RES.'),
'hconres': ('HC', 'H.CON.RES.'),
's': ('SN', 'S.'),
'sres': ('SE', 'S.RES.'),
'sjres': ('SJ', 'S.J.RES.'),
'sconres': ('SC', 'S.CON.RES.'),
'hamdt': ('HZ', 'H.AMDT.'),
'samdt': ('SP', 'S.AMDT.'),
'supamdt': ('SU', 'S.UP.AMDT.'),
}
thomas_types_2 = dict((v[0], k) for (k, v) in thomas_types.items()) # map e.g. { SE: sres, ...}
# cached committee map to map names to IDs
committee_names = {}
# get the mapping from THOMAS's committee names to THOMAS's committee IDs
# found on the advanced search page. committee_names[congress][name] = ID
# with subcommittee names as the committee name plus a pipe plus the subcommittee
# name.
def fetch_committee_names(congress, options):
congress = int(congress)
# Parse the THOMAS advanced search pages for the names that THOMAS uses for
# committees on bill pages, and map those to the IDs for the committees that are
# listed on the advanced search pages (but aren't shown on bill pages).
if not options.get('test', False):
logging.info("[%d] Fetching committee names..." % congress)
# allow body to be passed in from fixtures
if options.has_key('body'):
body = options['body']
else:
body = download(
"http://thomas.loc.gov/home/LegislativeData.php?&n=BSS&c=%d" % congress,
"%s/meta/thomas_committee_names.html" % congress,
options)
for chamber, options in re.findall('>Choose (House|Senate) Committees</option>(.*?)</select>', body, re.I | re.S):
for name, id in re.findall(r'<option value="(.*?)\{(.*?)}">', options, re.I | re.S):
id = str(id).upper()
name = name.strip().replace(" ", " ") # weirdness
if id.endswith("00"):
# Map chamber + committee name to its ID, minus the 00 at the end. On bill pages,
# committees appear as e.g. "House Finance." Except the JCSE.
if id != "JCSE00":
name = chamber + " " + name
# Correct for some oddness on THOMAS (but not on Congress.gov): The House Committee
# on House Administration appears just as "House Administration" and in the 104th/105th
# Congresses appears as "House Oversight" (likewise the full name is House Committee
# on House Oversight --- it's the House Administration committee still).
if name == "House House Administration":
name = "House Administration"
if name == "House House Oversight":
name = "House Oversight"
committee_names[name] = id[0:-2]
else:
# map committee ID + "|" + subcommittee name to the zero-padded subcommittee numeric ID
committee_names[id[0:-2] + "|" + name] = id[-2:]
# Correct for a limited number of other ways committees appear, owing probably to the
# committee name being changed mid-way through a Congress.
if congress == 95:
committee_names["House Intelligence (Select)"] = committee_names["House Intelligence (Permanent Select)"]
if congress == 96:
committee_names["Senate Human Resources"] = "SSHR"
if congress == 97:
committee_names["Senate Small Business (Select)"] = committee_names["Senate Small Business"]
if congress == 98:
committee_names["Senate Indian Affairs (Select)"] = committee_names["Senate Indian Affairs (Permanent Select)"]
if congress == 100:
committee_names["HSPO|Hoc Task Force on Presidential Pay Recommendation"] = committee_names["HSPO|Ad Hoc Task Force on Presidential Pay Recommendation"]
if congress == 103:
committee_names["Senate Indian Affairs (Permanent Select)"] = committee_names["Senate Indian Affairs"]
if congress == 108:
# This appears to be a mistake, a subcommittee appearing as a full committee. Map it to
# the full committee for now.
committee_names["House Antitrust (Full Committee Task Force)"] = committee_names["House Judiciary"]
committee_names["House Homeland Security"] = committee_names["House Homeland Security (Select)"]
if congress in range(108, 113):
committee_names["House Intelligence"] = committee_names["House Intelligence (Permanent Select)"]
def make_node(parent, tag, text, **attrs):
"""Make a node in an XML document."""
n = etree.Element(tag)
parent.append(n)
n.text = text
for k, v in attrs.items():
if v is None:
continue
if isinstance(v, datetime.datetime):
v = format_datetime(v)
n.set(k.replace("___", ""), v)
return n
# Correct mistakes on THOMAS
def thomas_corrections(thomas_id):
# C.A. Dutch Ruppersberger
if thomas_id == "02188":
thomas_id = "01728"
# Pat Toomey
if thomas_id == "01594":
thomas_id = "02085"
return thomas_id
# Return a subset of a mapping type
def slice_map(m, *args):
n = {}
for arg in args:
if arg in m:
n[arg] = m[arg]
return n
# Load a YAML file directly.
def direct_yaml_load(filename):
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
return yaml.load(open(filename), Loader=Loader)
# Load a pickle file.
def pickle_load(filename):
import pickle
return pickle.load(open(filename))
# Write to a pickle file.
def pickle_write(data, filename):
import pickle
mkdir_p(os.path.dirname(filename))
return pickle.dump(data, open(filename, "w"))
# Get the hash used to verify the contents of a file.
def get_file_hash(filename):
import hashlib
return hashlib.sha1(open(filename).read()).hexdigest()
# Get the location of the cached version of a file.
def get_cache_filename(filename):
return os.path.join(cache_dir(), filename + '.pickle')
# Check if the cached file is newer.
def check_cached_file(filename, cache_filename):
return (os.path.exists(cache_filename) and os.stat(cache_filename).st_mtime > os.stat(filename).st_mtime)
# Problem with finding a cache entry.
class CacheError(LookupError):
pass
# Load a cached file.
def cache_load(cache_filename, file_hash):
try:
cache_data = pickle_load(cache_filename)
except IOError:
raise CacheError("Could not retrieve potential cache file: %s" % (cache_filename))
# A cache file has a specific structure.
if "hash" not in cache_data or "data" not in cache_data:
raise TypeError("Not a cache file: %s" % (cache_filename))
# If the hashes don't match, we've retrieved the cache for something else.
if cache_data["hash"] != file_hash:
raise CacheError("Hashes do not match: %s, %s" % (file_hash, cache_data["hash"]))
return cache_data["data"]
# Cache a file.
def cache_write(file_data, filename, file_hash):
cache_data = {"hash": file_hash, "data": file_data}
return pickle_write(cache_data, filename)
# Attempt to load a cached version of a YAML file before loading the YAML file directly.
def yaml_load(filename):
file_hash = get_file_hash(filename)
cache_filename = get_cache_filename(filename)
# Try to load a cached version of the requested YAML file.
try:
yaml_data = cache_load(cache_filename, file_hash)
except CacheError:
# We don't have a cached version of the requested YAML file available, so we have to load it directly.
logging.warn("Using original YAML file...")
# Load the requested YAML file directly.
yaml_data = direct_yaml_load(filename)
# Cache the YAML data so we can retrieve it more quickly next time.
cache_write(yaml_data, cache_filename, file_hash)
else:
# We have a cached version of the requested YAML file available, so we can use it.
logging.info("Using cached YAML file...")
return yaml_data
# Make sure we have the congress-legislators repository available.
has_congress_legislators_repo = False
def require_congress_legislators_repo():
global has_congress_legislators_repo
# Once we have the congress-legislators repo, we don't need to keep getting it.
if has_congress_legislators_repo:
return
# Clone the congress-legislators repo if we don't have it.
if not os.path.exists("congress-legislators"):
logging.warn("Cloning the congress-legislators repo...")
os.system("git clone -q --depth 1 https://github.com/unitedstates/congress-legislators congress-legislators")
if os.environ.get("UPDATE_CONGRESS_LEGISLATORS") != "NO":
# Update the repo so we have the latest.
logging.warn("Updating the congress-legislators repo...")
# these two == git pull, but git pull ignores -q on the merge part so is less quiet
os.system("cd congress-legislators; git fetch -pq; git merge --ff-only -q origin/master")
# We now have the congress-legislators repo.
has_congress_legislators_repo = True
lookup_legislator_cache = []
def lookup_legislator(congress, role_type, name, state, party, when, id_requested, exclude=set()):
# This is a basic lookup function given the legislator's name, state, party,
# and the date of the vote.
# On the first load, cache all of the legislators' terms in memory.
# Group by Congress so we can limit our search later to be faster.
global lookup_legislator_cache
if not lookup_legislator_cache:
require_congress_legislators_repo()
lookup_legislator_cache = {} # from Congress number to list of (moc,term) tuples that might be in that Congress
for filename in ("legislators-historical", "legislators-current"):
for moc in yaml_load("congress-legislators/%s.yaml" % (filename)):
for term in moc["terms"]:
for c in xrange(congress_from_legislative_year(int(term['start'][0:4])) - 1,
congress_from_legislative_year(int(term['end'][0:4])) + 1 + 1):
lookup_legislator_cache.setdefault(c, []).append((moc, term))
def to_ascii(name):
name = name.replace("-", " ")
if not isinstance(name, unicode):
return name
import unicodedata
return u"".join(c for c in unicodedata.normalize('NFKD', name) if not unicodedata.combining(c))
# Scan all of the terms that cover 'when' for a match.
if isinstance(when, datetime.datetime):
when = when.date()
when = when.isoformat()
name_parts = to_ascii(name).split(", ", 1)
matches = []
for moc, term in lookup_legislator_cache[congress]:
# Make sure the date is surrounded by the term start/end dates.
if term['start'] > when:
continue # comparing ISO-formatted date strings
if term['end'] < when:
continue # comparing ISO-formatted date strings
# Compare the role type, state, and party, except for people who we know changed party.
if term['type'] != role_type:
continue
if term['state'] != state:
continue
if term['party'][0] != party and name not in ("Laughlin", "Crenshaw", "Goode", "Martinez", "Parker", "Emerson", "Tauzin", "Hayes", "Deal", "Forbes"):
continue
# When doing process-of-elimination matching, don't match on people we've already seen.
if moc["id"].get(id_requested) in exclude:
continue
# Compare the last name. Allow "Chenoweth" to match "Chenoweth Hage", but also
# allow "Millender McDonald" to match itself.
for name_info_rec in [moc['name']] + moc.get('other_names', []):
# for other_names, check that the record covers the right date range
if 'start' in name_info_rec and name_info_rec['start'] > when:
continue # comparing ISO-formatted date strings
if 'end' in name_info_rec and name_info_rec['end'] < when:
continue # comparing ISO-formatted date strings
# in order to process an other_name we have to go like this...
name_info = dict(moc['name']) # clone
name_info.update(name_info_rec) # override with the other_name information
# check last name
if name_parts[0] != to_ascii(name_info['last']) \
and name_parts[0] not in to_ascii(name_info['last']).split(" "):
continue # no match
# Compare the first name. Allow it to match either the first or middle name,
# and an initialized version of the first name (i.e. "E." matches "Eddie").
# Test the whole string (so that "Jo Ann" is compared to "Jo Ann") but also
# the first part of a string split (so "E. B." is compared as "E." to "Eddie").
first_names = (to_ascii(name_info['first']), to_ascii(name_info.get('nickname', "")), to_ascii(name_info['first'])[0] + ".")
if len(name_parts) >= 2 and \
name_parts[1] not in first_names and \
name_parts[1].split(" ")[0] not in first_names:
continue
break # match
else:
# no match
continue
# This is a possible match.
matches.append((moc, term))
# Return if there is a unique match.
if len(matches) == 0:
logging.warn("Could not match name %s (%s-%s; %s) to any legislator." % (name, state, party, when))
return None
if len(matches) > 1:
logging.warn("Multiple matches of name %s (%s-%s; %s) to legislators (excludes %s)." % (name, state, party, when, str(exclude)))
return None
return matches[0][0]['id'][id_requested]
# Create a map from one piece of legislators data to another.
# 'map_from' and 'map_to' are plain text terms used for the logging output and the filenames.
# 'map_function' is the function that actually does the mapping from one value to another.
# 'filename' is the source of the data to be mapped. (Default: "legislators-current")
# 'legislators_map' is the base object to build the map on top of; it's primarily used to combine maps using create_combined_legislators_map(). (Default: {})
def create_legislators_map(map_from, map_to, map_function, filename="legislators-current", legislators_map={}):
# Make sure we have the congress-legislators repo available.
require_congress_legislators_repo()
cache_filename = get_cache_filename("map-%s-%s-%s" % (map_from.lower().replace(" ", "_"), map_to.lower().replace(" ", "_"), filename))
# Check if the cached pickle file is newer than the original YAML file.
if check_cached_file("congress-legislators/%s.yaml" % (filename), cache_filename):
# The pickle file is newer, so it's probably safe to use the cached map.
logging.info("Using cached map from %s to %s for %s..." % (map_from, map_to, filename))
legislators_map = pickle_load(cache_filename)
else:
# The YAML file is newer, so we have to generate a new map.
logging.warn("Generating new map from %s to %s for %s..." % (map_from, map_to, filename))
# Load the YAML file and create a map based on the provided map function.
# Because we'll be caching the YAML file in a pickled file, create the cache
# directory where that will be stored.
if not os.path.exists("cache/congress-legislators"):
os.mkdir("cache/congress-legislators")
for item in yaml_load("congress-legislators/%s.yaml" % (filename)):
legislators_map = map_function(legislators_map, item)
# Save the new map to a new pickle file.
pickle_write(legislators_map, cache_filename)
return legislators_map
# Create a legislators map combining data from multiple legislators files.
# 'map_from', 'map_to', 'map_function' are passed directly to create_legislators_map().
# 'filenames' is the list of the sources of the data to be mapped. (Default: [ "executive", "legislators-historical", "legislators-current" ])
def create_combined_legislators_map(map_from, map_to, map_function, filenames=["executive", "legislators-historical", "legislators-current"]):
combined_legislators_map = {}
for filename in filenames:
combined_legislators_map = create_legislators_map(map_from, map_to, map_function, filename, combined_legislators_map)
return combined_legislators_map
# Generate a map between a person's many IDs.
person_id_map = {}
def generate_person_id_map():
def map_function(person_id_map, person):
for source_id_type, source_id in person["id"].items():
# Instantiate this ID type.
if source_id_type not in person_id_map:
person_id_map[source_id_type] = {}
# Certain ID types have multiple IDs.
source_ids = source_id if isinstance(source_id, list) else [source_id]
for source_id in source_ids:
# Instantiate this value for this ID type.
if source_id not in person_id_map[source_id_type]:
person_id_map[source_id_type][source_id] = {}
# Loop through all the ID types and values and map them to this ID type.
for target_id_type, target_id in person["id"].items():
# Don't map an ID type to itself.
if target_id_type != source_id_type:
person_id_map[source_id_type][source_id][target_id_type] = target_id
return person_id_map
# Make the person ID map available in the global space.
global person_id_map
person_id_map = create_combined_legislators_map("person", "ID", map_function)
# Return the map generated by generate_person_id_map().
def get_person_id_map():
global person_id_map
# If the person ID map is not available yet, generate it.
if not person_id_map:
generate_person_id_map()
return person_id_map
# Get a particular ID for a person from another ID.
# 'source_id_type' is the ID type provided to identify the person.
# 'source_id' is the provided ID of the aforementioned type.
# 'target_id_type' is the desired ID type for the aforementioned person.
def get_person_id(source_id_type, source_id, target_id_type):
person_id_map = get_person_id_map()
if source_id_type not in person_id_map:
raise KeyError("'%s' is not a valid ID type." % (source_id_type))
if source_id not in person_id_map[source_id_type]:
raise KeyError("'%s' is not a valid '%s' ID." % (source_id, source_id_type))
if target_id_type not in person_id_map[source_id_type][source_id]:
raise KeyError("No corresponding '%s' ID for '%s' ID '%s'." % (target_id_type, source_id_type, source_id))
return person_id_map[source_id_type][source_id][target_id_type]
# Generate a map from a person to the Congresses they served during.
person_congresses_map = {}
def generate_person_congresses_map():
def map_function(person_congresses_map, person):
try:
bioguide_id = person["id"]["bioguide"]
except KeyError:
# print person["id"], person["name"]
return person_congresses_map
if bioguide_id not in person_congresses_map:
person_congresses_map[bioguide_id] = []
for term in person["terms"]:
for congress in get_term_congresses(term):
person_congresses_map[bioguide_id].append(congress)
person_congresses_map[bioguide_id].sort()
return person_congresses_map
# Make the person congresses map available in the global space.
global person_congresses_map
person_congresses_map = create_combined_legislators_map("person", "Congresses", map_function)
# Return the map generated by generate_person_congresses_map().
def get_person_congresses_map():
global person_congresses_map
# If the person Congresses map is not available yet, generate it.
if not person_congresses_map:
generate_person_congresses_map()
return person_congresses_map
# Get a list of Congresses that a person served during.
# 'person_id' is the ID of the desired person.
# 'person_id_type' is the ID type provided. (Default: "bioguide")
def get_person_congresses(person_id, person_id_type="bioguide"):
bioguide_id = person_id if person_id_type == "bioguide" else get_person_id(person_id_type, person_id, "bioguide")
person_congresses_map = get_person_congresses_map()
if bioguide_id not in person_congresses_map:
raise KeyError("No known Congresses for BioGuide ID '%s'." % (bioguide_id))
return person_congresses_map[bioguide_id]
# Generate a map from a Congress to the persons who served during it.
congress_persons_map = {}
def generate_congress_persons_map():
def map_function(congress_persons_map, person):
try:
bioguide_id = person["id"]["bioguide"]
except KeyError:
# print person["id"], person["name"]
return congress_persons_map
for term in person["terms"]:
for congress in get_term_congresses(term):
if congress not in congress_persons_map:
congress_persons_map[congress] = set()
congress_persons_map[congress].add(bioguide_id)
return congress_persons_map
# Make the person congresses map available in the global space.
global congress_persons_map
congress_persons_map = create_combined_legislators_map("Congress", "persons", map_function)
# Return the map generated by generate_congress_persons_map().
def get_congress_persons_map():
global congress_persons_map
# If the Congress persons map is not available yet, generate it.
if not congress_persons_map:
generate_congress_persons_map()
return congress_persons_map
# Get a list of persons who served during a particular Congress.
# 'congress' is the desired Congress.
def get_congress_persons(congress):
congress_persons_map = get_congress_persons_map()
if congress not in congress_persons_map:
raise KeyError("No known persons for Congress '%s'." % (congress))
return congress_persons_map[congress]
# XXX: This exception is deprecated. (It has a typo.) Only use in relation to get_govtrack_person_id().
class UnmatchedIdentifer(Exception):
def __init__(self, id_type, id_value, help_url):
super(UnmatchedIdentifer, self).__init__("%s=%s %s" % (id_type, str(id_value), help_url))
# XXX: This function is deprecated. Use get_person_id() instead.
def get_govtrack_person_id(source_id_type, source_id):
try:
govtrack_person_id = get_person_id(source_id_type, source_id, "govtrack")
except KeyError:
see_also = ""
if source_id_type == "thomas":
# Suggest a URL on congress.gov to quickly look up who the ID corresponds to.
# We store the IDs as strings with leading zeroes like on THOMAS, but in
# Congress.gov URLs it must not be zero-padded.
see_also = "http://www.congress.gov/member/xxx/%d" % int(source_id)
logging.error("GovTrack ID not known for %s %s. (%s)" % (source_id_type, str(source_id), see_also))
raise UnmatchedIdentifer(source_id_type, source_id, see_also)
return govtrack_person_id
| |
from yamtbx.dataproc.XIO import XIO
from collections import OrderedDict
sp_params_strs = OrderedDict(((("BL32XU", "EIGER9M", None, None), """\
distl {
detector_tiles = 1
peripheral_margin = 0
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
cheetah {
ADCthresh = 5
MinSNR = 8
MinPixCount = 3
MaxPixCount = 40
LocalBGRadius = 2
MinPeakSeparation = 0
algorithm = 8
binning = 1
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "4x4", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HS", "8x8", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "3x3", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 4
minimum_signal_height = 3.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "4x4", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "8x8", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "MX225HE", "16x16", None), """\
distl {
detector_tiles = 3
peripheral_margin = 5
minimum_spot_area = 1
minimum_signal_height = 4.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "Q315r", "2x2", None), """\
distl {
detector_tiles = 3
peripheral_margin = 10
minimum_spot_area = 5
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL32XU", "CMOS", "1x1", None), """\
distl {
detector_tiles = 1
peripheral_margin = 0
minimum_spot_area = 3
minimum_signal_height = 2.
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
"""),
(("BL41XU", "PILATUS3 6M", None, None), """\
distl {
detector_tiles = None
peripheral_margin = 10
minimum_spot_area = 2
minimum_signal_height = 4
minimum_spot_height = None
}
xds {
strong_pixel = 4
minimum_number_of_pixels_in_a_spot = 3
background_pixel = None
}
software_binning = False
""")
))
def get_common_params_str(use_cuda=False, env="oys"):
if use_cuda:
dic = dict(use_cuda="True")
else:
#if nproc is None: nproc = get_number_of_processors(default=4)
#if env == "ppu": nproc //= 2
dic = dict(use_cuda="False")
return """\
engine = *distl xds
distl {
res {
outer = 5.
inner = 30.
}
scanbox_windows = 101 51 51
}
xds {
do_defpix = True
value_range_for_trusted_detector_pixels = 9000. 30000
}
cuda_median_background {
active = %(use_cuda)s
filter_radius = 10
filter_repeat = 1
}
#bkg_image = /home/yam/work/smoothing_131114/xds_process_scan/BKGINIT.cbf
#gain_image = /home/yam/work/smoothing_131114/xds_process_scan/GAIN.cbf
#bkg_image = /home/yam/work/smoothing_131114/my_scan172/honki/bkginit_20_20_1.cbf
#gain_image = /home/yam/work/smoothing_131114/my_scan172/honki/test_rev_median5.cbf
#gain_image_nbxy = 3,3
""" % dic
def get_key_by_img(imgfile):
im = XIO.Image(imgfile)
if im.header["ImageType"] == "marccd":
if im.header["SerialNumber"] in ("106", None): # None for 2013B
if im.header["Height"] == im.header["Width"] == 1440:
return ("BL32XU", "MX225HS", "4x4", None)
if im.header["Height"] == im.header["Width"] == 2880:
return ("BL32XU", "MX225HS", "2x2", None)
if im.header["Height"] == im.header["Width"] == 720:
return ("BL32XU", "MX225HS", "8x8", None)
if im.header["SerialNumber"] == "31":
if im.header["Height"] == im.header["Width"] == 384:
return ("BL32XU", "MX225HE", "16x16", None)
if im.header["Height"] == im.header["Width"] == 768:
return ("BL32XU", "MX225HE", "8x8", None)
if im.header["Height"] == im.header["Width"] == 1536:
return ("BL32XU", "MX225HE", "4x4", None)
if im.header["Height"] == im.header["Width"] == 2046:
return ("BL32XU", "MX225HE", "3x3", None)
if im.header["Height"] == im.header["Width"] == 3072:
return ("BL32XU", "MX225HE", "2x2", None)
elif im.header["ImageType"] == "adsc":
if im.header["Height"]==im.header["Width"]==2352 and int(im.header["PixelX"]*1000)==50:
return ("BL32XU", "CMOS", "1x1", None) # This may be used at BL26B2.
if im.header["SerialNumber"] == "915":
if im.header["Height"] == im.header["Width"] == 3072:
return ("BL32XU", "Q315r", "2x2", None)
elif im.header["SerialNumber"] == "PILATUS3 6M, S/N 60-0125":
return ("BL41XU", "PILATUS3 6M", None, None)
raise Exception("We do not know such a detector")
# get_key_by_img()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import abc
import logging
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict
import re
import csv
import tempfile
import luigi
from luigi import Task
logger = logging.getLogger('luigi-interface')
try:
import requests
except ImportError:
logger.warning("This module requires the python package 'requests'.")
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
def get_soql_fields(soql):
"""
Gets queried columns names.
"""
soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) # get fields
soql_fields = re.sub(' ', '', soql_fields.group()) # remove extra spaces
soql_fields = re.sub('\t', '', soql_fields) # remove tabs
fields = re.split(',|\n|\r|', soql_fields) # split on commas and newlines
fields = [field for field in fields if field != ''] # remove empty strings
return fields
def ensure_utf(value):
return value.encode("utf-8") if isinstance(value, unicode) else value
def parse_results(fields, data):
"""
Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data
"""
master = []
for record in data['records']: # for each 'record' in response
row = [None] * len(fields) # create null list the length of number of columns
for obj, value in record.iteritems(): # for each obj in record
if not isinstance(value, (dict, list, tuple)): # if not data structure
if obj in fields:
row[fields.index(obj)] = ensure_utf(value)
elif isinstance(value, dict) and obj != 'attributes': # traverse down into object
path = obj
_traverse_results(value, fields, row, path)
master.append(row)
return master
def _traverse_results(value, fields, row, path):
"""
Helper method for parse_results().
Traverses through ordered dict and recursively calls itself when encountering a dictionary
"""
for f, v in value.iteritems(): # for each item in obj
field_name = '{path}.{name}'.format(path=path, name=f) if path else f
if not isinstance(v, (dict, list, tuple)): # if not data structure
if field_name in fields:
row[fields.index(field_name)] = ensure_utf(v)
elif isinstance(v, dict) and f != 'attributes': # it is a dict
_traverse_results(v, fields, row, field_name)
class salesforce(luigi.Config):
"""
Config system to get config vars from 'salesforce' section in configuration file.
Did not include sandbox_name here, as the user may have multiple sandboxes.
"""
username = luigi.Parameter(default='')
password = luigi.Parameter(default='')
security_token = luigi.Parameter(default='')
# sandbox token
sb_security_token = luigi.Parameter(default='')
class QuerySalesforce(Task):
@abc.abstractproperty
def object_name(self):
"""
Override to return the SF object we are querying.
Must have the SF "__c" suffix if it is a customer object.
"""
return None
@property
def use_sandbox(self):
"""
Override to specify use of SF sandbox.
True iff we should be uploading to a sandbox environment instead of the production organization.
"""
return False
@property
def sandbox_name(self):
"""Override to specify the sandbox name if it is intended to be used."""
return None
@abc.abstractproperty
def soql(self):
"""Override to return the raw string SOQL or the path to it."""
return None
@property
def is_soql_file(self):
"""Override to True if soql property is a file path."""
return False
@property
def content_type(self):
"""
Override to use a different content type. Salesforce allows XML, CSV, ZIP_CSV, or ZIP_XML. Defaults to CSV.
"""
return "CSV"
def run(self):
if self.use_sandbox and not self.sandbox_name:
raise Exception("Parameter sf_sandbox_name must be provided when uploading to a Salesforce Sandbox")
sf = SalesforceAPI(salesforce().username,
salesforce().password,
salesforce().security_token,
salesforce().sb_security_token,
self.sandbox_name)
job_id = sf.create_operation_job('query', self.object_name, content_type=self.content_type)
logger.info("Started query job %s in salesforce for object %s" % (job_id, self.object_name))
batch_id = ''
msg = ''
try:
if self.is_soql_file:
with open(self.soql, 'r') as infile:
self.soql = infile.read()
batch_id = sf.create_batch(job_id, self.soql, self.content_type)
logger.info("Creating new batch %s to query: %s for job: %s." % (batch_id, self.object_name, job_id))
status = sf.block_on_batch(job_id, batch_id)
if status['state'].lower() == 'failed':
msg = "Batch failed with message: %s" % status['state_message']
logger.error(msg)
# don't raise exception if it's b/c of an included relationship
# normal query will execute (with relationship) after bulk job is closed
if 'foreign key relationships not supported' not in status['state_message'].lower():
raise Exception(msg)
else:
result_ids = sf.get_batch_result_ids(job_id, batch_id)
# If there's only one result, just download it, otherwise we need to merge the resulting downloads
if len(result_ids) == 1:
data = sf.get_batch_result(job_id, batch_id, result_ids[0])
with open(self.output().path, 'w') as outfile:
outfile.write(data)
else:
# Download each file to disk, and then merge into one.
# Preferring to do it this way so as to minimize memory consumption.
for i, result_id in enumerate(result_ids):
logger.info("Downloading batch result %s for batch: %s and job: %s" % (result_id, batch_id, job_id))
with open("%s.%d" % (self.output().path, i), 'w') as outfile:
outfile.write(sf.get_batch_result(job_id, batch_id, result_id))
logger.info("Merging results of batch %s" % batch_id)
self.merge_batch_results(result_ids)
finally:
logger.info("Closing job %s" % job_id)
sf.close_job(job_id)
if 'state_message' in status and 'foreign key relationships not supported' in status['state_message'].lower():
logger.info("Retrying with REST API query")
data_file = sf.query_all(self.soql)
reader = csv.reader(data_file)
with open(self.output().path, 'w') as outfile:
writer = csv.writer(outfile, dialect='excel')
for row in reader:
writer.writerow(row)
def merge_batch_results(self, result_ids):
"""
Merges the resulting files of a multi-result batch bulk query.
"""
outfile = open(self.output().path, 'w')
if self.content_type.lower() == 'csv':
for i, result_id in enumerate(result_ids):
with open("%s.%d" % (self.output().path, i), 'r') as f:
header = f.readline()
if i == 0:
outfile.write(header)
for line in f:
outfile.write(line)
else:
raise Exception("Batch result merging not implemented for %s" % self.content_type)
outfile.close()
class SalesforceAPI(object):
"""
Class used to interact with the SalesforceAPI. Currently provides only the
methods necessary for performing a bulk upload operation.
"""
API_VERSION = 34.0
SOAP_NS = "{urn:partner.soap.sforce.com}"
API_NS = "{http://www.force.com/2009/06/asyncapi/dataload}"
def __init__(self, username, password, security_token, sb_token=None, sandbox_name=None):
self.username = username
self.password = password
self.security_token = security_token
self.sb_security_token = sb_token
self.sandbox_name = sandbox_name
if self.sandbox_name:
self.username += ".%s" % self.sandbox_name
self.session_id = None
self.server_url = None
self.hostname = None
def start_session(self):
"""
Starts a Salesforce session and determines which SF instance to use for future requests.
"""
if self.has_active_session():
raise Exception("Session already in progress.")
response = requests.post(self._get_login_url(),
headers=self._get_login_headers(),
data=self._get_login_xml())
response.raise_for_status()
root = ET.fromstring(response.text)
for e in root.iter("%ssessionId" % self.SOAP_NS):
if self.session_id:
raise Exception("Invalid login attempt. Multiple session ids found.")
self.session_id = e.text
for e in root.iter("%sserverUrl" % self.SOAP_NS):
if self.server_url:
raise Exception("Invalid login attempt. Multiple server urls found.")
self.server_url = e.text
if not self.has_active_session():
raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." %
(self.session_id, self.server_url))
self.hostname = urlsplit(self.server_url).hostname
def has_active_session(self):
return self.session_id and self.server_url
def query(self, query, **kwargs):
"""
Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload.
:param query: the SOQL query to send to Salesforce, e.g. "SELECT id from Lead WHERE email = 'a@b.com'"
"""
params = {'q': query}
response = requests.get(self._get_norm_query_url(),
headers=self._get_rest_headers(),
params=params,
**kwargs)
if response.status_code != requests.codes.ok:
raise Exception(response.content)
return response.json()
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.hostname,
next_record_url=next_records_identifier))
else:
url = self._get_norm_query_url() + '{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
response = requests.get(url, headers=self._get_rest_headers(), **kwargs)
response.raise_for_status()
return response.json()
def query_all(self, query, **kwargs):
"""
Returns the full set of results for the `query`. This is a
convenience wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
:param query: the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com"`
"""
# Make the initial query to Salesforce
response = self.query(query, **kwargs)
# get fields
fields = get_soql_fields(query)
# put fields and first page of results into a temp list to be written to TempFile
tmp_list = [fields]
tmp_list.extend(parse_results(fields, response))
tmp_dir = luigi.configuration.get_config().get('salesforce', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(mode='a+b', dir=tmp_dir)
writer = csv.writer(tmp_file)
writer.writerows(tmp_list)
# The number of results might have exceeded the Salesforce batch limit
# so check whether there are more results and retrieve them if so.
length = len(response['records'])
while not response['done']:
response = self.query_more(response['nextRecordsUrl'], identifier_is_url=True, **kwargs)
writer.writerows(parse_results(fields, response))
length += len(response['records'])
if not length % 10000:
logger.info('Requested {0} lines...'.format(length))
logger.info('Requested a total of {0} lines.'.format(length))
tmp_file.seek(0)
return tmp_file
# Generic Rest Function
def restful(self, path, params):
"""
Allows you to make a direct REST call if you know the path
Arguments:
:param path: The path of the request. Example: sobjects/User/ABC123/password'
:param params: dict of parameters to pass to the path
"""
url = self._get_norm_base_url() + path
response = requests.get(url, headers=self._get_rest_headers(), params=params)
if response.status_code != 200:
raise Exception(response)
json_result = response.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
def create_operation_job(self, operation, obj, external_id_field_name=None, content_type=None):
"""
Creates a new SF job that for doing any operation (insert, upsert, update, delete, query)
:param operation: delete, insert, query, upsert, update, hardDelete. Must be lowercase.
:param obj: Parent SF object
:param external_id_field_name: Optional.
"""
if not self.has_active_session():
self.start_session()
response = requests.post(self._get_create_job_url(),
headers=self._get_create_job_headers(),
data=self._get_create_job_xml(operation, obj, external_id_field_name, content_type))
response.raise_for_status()
root = ET.fromstring(response.text)
job_id = root.find('%sid' % self.API_NS).text
return job_id
def get_job_details(self, job_id):
"""
Gets all details for existing job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: job info as xml
"""
response = requests.get(self._get_job_details_url(job_id))
response.raise_for_status()
return response
def abort_job(self, job_id):
"""
Abort an existing job. When a job is aborted, no more records are processed.
Changes to data may already have been committed and aren't rolled back.
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: abort response as xml
"""
response = requests.post(self._get_abort_job_url(job_id),
headers=self._get_abort_job_headers(),
data=self._get_abort_job_xml())
response.raise_for_status()
return response
def close_job(self, job_id):
"""
Closes job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: close response as xml
"""
if not job_id or not self.has_active_session():
raise Exception("Can not close job without valid job_id and an active session.")
response = requests.post(self._get_close_job_url(job_id),
headers=self._get_close_job_headers(),
data=self._get_close_job_xml())
response.raise_for_status()
return response
def create_batch(self, job_id, data, file_type):
"""
Creates a batch with either a string of data or a file containing data.
If a file is provided, this will pull the contents of the file_target into memory when running.
That shouldn't be a problem for any files that meet the Salesforce single batch upload
size limit (10MB) and is done to ensure compressed files can be uploaded properly.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param data:
:return: Returns batch_id
"""
if not job_id or not self.has_active_session():
raise Exception("Can not create a batch without a valid job_id and an active session.")
headers = self._get_create_batch_content_headers(file_type)
headers['Content-Length'] = len(data)
response = requests.post(self._get_create_batch_url(job_id),
headers=headers,
data=data)
response.raise_for_status()
root = ET.fromstring(response.text)
batch_id = root.find('%sid' % self.API_NS).text
return batch_id
def block_on_batch(self, job_id, batch_id, sleep_time_seconds=5, max_wait_time_seconds=-1):
"""
Blocks until @batch_id is completed or failed.
:param job_id:
:param batch_id:
:param sleep_time_seconds:
:param max_wait_time_seconds:
"""
if not job_id or not batch_id or not self.has_active_session():
raise Exception("Can not block on a batch without a valid batch_id, job_id and an active session.")
start_time = time.time()
status = {}
while max_wait_time_seconds < 0 or time.time() - start_time < max_wait_time_seconds:
status = self._get_batch_info(job_id, batch_id)
logger.info("Batch %s Job %s in state %s. %s records processed. %s records failed." %
(batch_id, job_id, status['state'], status['num_processed'], status['num_failed']))
if status['state'].lower() in ["completed", "failed"]:
return status
time.sleep(sleep_time_seconds)
raise Exception("Batch did not complete in %s seconds. Final status was: %s" % (sleep_time_seconds, status))
def get_batch_results(self, job_id, batch_id):
"""
DEPRECATED: Use `get_batch_result_ids`
"""
warnings.warn("get_batch_results is deprecated and only returns one batch result. Please use get_batch_result_ids")
return self.get_batch_result_ids(job_id, batch_id)[0]
def get_batch_result_ids(self, job_id, batch_id):
"""
Get result IDs of a batch that has completed processing.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param batch_id: batch_id as returned by 'create_batch(...)'
:return: list of batch result IDs to be used in 'get_batch_result(...)'
"""
response = requests.get(self._get_batch_results_url(job_id, batch_id),
headers=self._get_batch_info_headers())
response.raise_for_status()
root = ET.fromstring(response.text)
result_ids = [r.text for r in root.findall('%sresult' % self.API_NS)]
return result_ids
def get_batch_result(self, job_id, batch_id, result_id):
"""
Gets result back from Salesforce as whatever type was originally sent in create_batch (xml, or csv).
:param job_id:
:param batch_id:
:param result_id:
"""
response = requests.get(self._get_batch_result_url(job_id, batch_id, result_id),
headers=self._get_session_headers())
response.raise_for_status()
return response.content
def _get_batch_info(self, job_id, batch_id):
response = requests.get(self._get_batch_info_url(job_id, batch_id),
headers=self._get_batch_info_headers())
response.raise_for_status()
root = ET.fromstring(response.text)
result = {
"state": root.find('%sstate' % self.API_NS).text,
"num_processed": root.find('%snumberRecordsProcessed' % self.API_NS).text,
"num_failed": root.find('%snumberRecordsFailed' % self.API_NS).text,
}
if root.find('%sstateMessage' % self.API_NS) is not None:
result['state_message'] = root.find('%sstateMessage' % self.API_NS).text
return result
def _get_login_url(self):
server = "login" if not self.sandbox_name else "test"
return "https://%s.salesforce.com/services/Soap/u/%s" % (server, self.API_VERSION)
def _get_base_url(self):
return "https://%s/services" % self.hostname
def _get_bulk_base_url(self):
# Expands on Base Url for Bulk
return "%s/async/%s" % (self._get_base_url(), self.API_VERSION)
def _get_norm_base_url(self):
# Expands on Base Url for Norm
return "%s/data/v%s" % (self._get_base_url(), self.API_VERSION)
def _get_norm_query_url(self):
# Expands on Norm Base Url
return "%s/query" % self._get_norm_base_url()
def _get_create_job_url(self):
# Expands on Bulk url
return "%s/job" % (self._get_bulk_base_url())
def _get_job_id_url(self, job_id):
# Expands on Job Creation url
return "%s/%s" % (self._get_create_job_url(), job_id)
def _get_job_details_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_abort_job_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_close_job_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_create_batch_url(self, job_id):
# Expands on basic Job Id url
return "%s/batch" % (self._get_job_id_url(job_id))
def _get_batch_info_url(self, job_id, batch_id):
# Expands on Batch Creation url
return "%s/%s" % (self._get_create_batch_url(job_id), batch_id)
def _get_batch_results_url(self, job_id, batch_id):
# Expands on Batch Info url
return "%s/result" % (self._get_batch_info_url(job_id, batch_id))
def _get_batch_result_url(self, job_id, batch_id, result_id):
# Expands on Batch Results url
return "%s/%s" % (self._get_batch_results_url(job_id, batch_id), result_id)
def _get_login_headers(self):
headers = {
'Content-Type': "text/xml; charset=UTF-8",
'SOAPAction': 'login'
}
return headers
def _get_session_headers(self):
headers = {
'X-SFDC-Session': self.session_id
}
return headers
def _get_norm_session_headers(self):
headers = {
'Authorization': 'Bearer %s' % self.session_id
}
return headers
def _get_rest_headers(self):
headers = self._get_norm_session_headers()
headers['Content-Type'] = 'application/json'
return headers
def _get_job_headers(self):
headers = self._get_session_headers()
headers['Content-Type'] = "application/xml; charset=UTF-8"
return headers
def _get_create_job_headers(self):
return self._get_job_headers()
def _get_abort_job_headers(self):
return self._get_job_headers()
def _get_close_job_headers(self):
return self._get_job_headers()
def _get_create_batch_content_headers(self, content_type):
headers = self._get_session_headers()
content_type = 'text/csv' if content_type.lower() == 'csv' else 'application/xml'
headers['Content-Type'] = "%s; charset=UTF-8" % content_type
return headers
def _get_batch_info_headers(self):
return self._get_session_headers()
def _get_login_xml(self):
return """<?xml version="1.0" encoding="utf-8" ?>
<env:Envelope xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:env="http://schemas.xmlsoap.org/soap/envelope/">
<env:Body>
<n1:login xmlns:n1="urn:partner.soap.sforce.com">
<n1:username>%s</n1:username>
<n1:password>%s%s</n1:password>
</n1:login>
</env:Body>
</env:Envelope>
""" % (self.username, self.password, self.security_token if self.sandbox_name is None else self.sb_security_token)
def _get_create_job_xml(self, operation, obj, external_id_field_name, content_type):
external_id_field_name_element = "" if not external_id_field_name else \
"\n<externalIdFieldName>%s</externalIdFieldName>" % external_id_field_name
# Note: "Unable to parse job" error may be caused by reordering fields.
# ExternalIdFieldName element must be before contentType element.
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<operation>%s</operation>
<object>%s</object>
%s
<contentType>%s</contentType>
</jobInfo>
""" % (operation, obj, external_id_field_name_element, content_type)
def _get_abort_job_xml(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<state>Aborted</state>
</jobInfo>
"""
def _get_close_job_xml(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<state>Closed</state>
</jobInfo>
"""
| |
"""
BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21).
Adapted from wsgiref.simple_server: http://svn.eby-sarna.com/wsgiref/
This is a simple server for use in testing or debugging Django apps. It hasn't
been reviewed for security issues. Don't use it for production use.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import mimetypes
import os
import re
import socket
import stat
import sys
import urllib
from django.core.management.color import color_style
from django.utils.http import http_date
from django.utils._os import safe_join
__version__ = "0.1"
__all__ = ['WSGIServer','WSGIRequestHandler']
server_version = "WSGIServer/" + __version__
sys_version = "Python/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class WSGIServerException(Exception):
pass
class FileWrapper(object):
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def next(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers(object):
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if not isinstance(headers, list):
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%s)" % `self._headers`
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
_hop_headers = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return header_name.lower() in _hop_headers
class ServerHandler(object):
"""Manage the invocation of a WSGI application"""
# Configuration parameters; can override per-subclass or per-instance
wsgi_version = (1,0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
origin_server = True # We are transmitting direct to client
http_version = "1.0" # Version that should be used for response
server_software = software_version
# os_environ is used to supply configuration from the OS environment:
# by default it's a copy of 'os.environ' as of import time, but you can
# override this in e.g. your __init__ method.
os_environ = dict(os.environ.items())
# Collaborator classes
wsgi_file_wrapper = FileWrapper # set to None to disable
headers_class = Headers # must be a Headers-like class
# Error handling (also per-subclass or per-instance)
traceback_limit = None # Print entire traceback to self.get_stderr()
error_status = "500 INTERNAL SERVER ERROR"
error_headers = [('Content-Type','text/plain')]
# State variables (don't mess with these)
status = result = None
headers_sent = False
headers = None
bytes_sent = 0
def __init__(self, stdin, stdout, stderr, environ, multithread=True,
multiprocess=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.base_env = environ
self.wsgi_multithread = multithread
self.wsgi_multiprocess = multiprocess
def run(self, application):
"""Invoke the application"""
# Note to self: don't move the close()! Asynchronous servers shouldn't
# call close() from finish_response(), so if you close() anywhere but
# the double-error branch here, you'll break asynchronous servers by
# prematurely closing. Async servers must return from 'run()' without
# closing if there might still be output to iterate over.
try:
self.setup_environ()
self.result = application(self.environ, self.start_response)
self.finish_response()
except:
try:
self.handle_error()
except:
# If we get an error handling an error, just give up already!
self.close()
raise # ...and let the actual server figure it out.
def setup_environ(self):
"""Set up the environment for one request"""
env = self.environ = self.os_environ.copy()
self.add_cgi_vars()
env['wsgi.input'] = self.get_stdin()
env['wsgi.errors'] = self.get_stderr()
env['wsgi.version'] = self.wsgi_version
env['wsgi.run_once'] = self.wsgi_run_once
env['wsgi.url_scheme'] = self.get_scheme()
env['wsgi.multithread'] = self.wsgi_multithread
env['wsgi.multiprocess'] = self.wsgi_multiprocess
if self.wsgi_file_wrapper is not None:
env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
if self.origin_server and self.server_software:
env.setdefault('SERVER_SOFTWARE',self.server_software)
def finish_response(self):
"""
Send any iterable data, then close self and the iterable
Subclasses intended for use in asynchronous servers will want to
redefine this method, such that it sets up callbacks in the event loop
to iterate over the data, and to call 'self.close()' once the response
is finished.
"""
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
self.close()
def get_scheme(self):
"""Return the URL scheme being used"""
return guess_scheme(self.environ)
def set_content_length(self):
"""Compute Content-Length or switch to chunked encoding if possible"""
try:
blocks = len(self.result)
except (TypeError, AttributeError, NotImplementedError):
pass
else:
if blocks==1:
self.headers['Content-Length'] = str(self.bytes_sent)
return
# XXX Try for chunked encoding if origin server and client is 1.1
def cleanup_headers(self):
"""Make any necessary header changes or defaults
Subclasses can extend this to add other defaults.
"""
if 'Content-Length' not in self.headers:
self.set_content_length()
def start_response(self, status, headers,exc_info=None):
"""'start_response()' callable as specified by PEP 333"""
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
elif self.headers is not None:
raise AssertionError("Headers already set!")
assert isinstance(status, str),"Status must be a string"
assert len(status)>=4,"Status must be at least 4 characters"
assert int(status[:3]),"Status message must begin w/3-digit code"
assert status[3]==" ", "Status message must have a space after code"
if __debug__:
for name,val in headers:
assert isinstance(name, str),"Header names must be strings"
assert isinstance(val, str),"Header values must be strings"
assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
self.status = status
self.headers = self.headers_class(headers)
return self.write
def send_preamble(self):
"""Transmit version/status/date/server, via self._write()"""
if self.origin_server:
if self.client_is_modern():
self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
if 'Date' not in self.headers:
self._write(
'Date: %s\r\n' % http_date()
)
if self.server_software and 'Server' not in self.headers:
self._write('Server: %s\r\n' % self.server_software)
else:
self._write('Status: %s\r\n' % self.status)
def write(self, data):
"""'write()' callable as specified by PEP 333"""
assert isinstance(data, str), "write() argument must be string"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
# If data is too large, socket will choke, so write chunks no larger
# than 32MB at a time.
length = len(data)
if length > 33554432:
offset = 0
while offset < length:
chunk_size = min(33554432, length)
self._write(data[offset:offset+chunk_size])
self._flush()
offset += chunk_size
else:
self._write(data)
self._flush()
def sendfile(self):
"""Platform-specific file transmission
Override this method in subclasses to support platform-specific
file transmission. It is only called if the application's
return iterable ('self.result') is an instance of
'self.wsgi_file_wrapper'.
This method should return a true value if it was able to actually
transmit the wrapped file-like object using a platform-specific
approach. It should return a false value if normal iteration
should be used instead. An exception can be raised to indicate
that transmission was attempted, but failed.
NOTE: this method should call 'self.send_headers()' if
'self.headers_sent' is false and it is going to attempt direct
transmission of the file1.
"""
return False # No platform-specific transmission by default
def finish_content(self):
"""Ensure headers and content have both been sent"""
if not self.headers_sent:
self.headers['Content-Length'] = "0"
self.send_headers()
else:
pass # XXX check if content-length was too short?
def close(self):
try:
self.request_handler.log_request(self.status.split(' ',1)[0], self.bytes_sent)
finally:
try:
if hasattr(self.result,'close'):
self.result.close()
finally:
self.result = self.headers = self.status = self.environ = None
self.bytes_sent = 0; self.headers_sent = False
def send_headers(self):
"""Transmit headers to the client, via self._write()"""
self.cleanup_headers()
self.headers_sent = True
if not self.origin_server or self.client_is_modern():
self.send_preamble()
self._write(str(self.headers))
def result_is_file(self):
"""True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
wrapper = self.wsgi_file_wrapper
return wrapper is not None and isinstance(self.result,wrapper)
def client_is_modern(self):
"""True if client can accept status and headers"""
return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
def log_exception(self,exc_info):
"""Log the 'exc_info' tuple in the server log
Subclasses may override to retarget the output or change its format.
"""
try:
from traceback import print_exception
stderr = self.get_stderr()
print_exception(
exc_info[0], exc_info[1], exc_info[2],
self.traceback_limit, stderr
)
stderr.flush()
finally:
exc_info = None
def handle_error(self):
"""Log current error, and send error output to client if possible"""
self.log_exception(sys.exc_info())
if not self.headers_sent:
self.result = self.error_output(self.environ, self.start_response)
self.finish_response()
# XXX else: attempt advanced recovery techniques for HTML or text?
def error_output(self, environ, start_response):
import traceback
start_response(self.error_status, self.error_headers[:], sys.exc_info())
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Pure abstract methods; *must* be overridden in subclasses
def _write(self,data):
self.stdout.write(data)
self._write = self.stdout.write
def _flush(self):
self.stdout.flush()
self._flush = self.stdout.flush
def get_stdin(self):
return self.stdin
def get_stderr(self):
return self.stderr
def add_cgi_vars(self):
self.environ.update(self.base_env)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
try:
HTTPServer.server_bind(self)
except Exception, e:
raise WSGIServerException(e)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def __init__(self, *args, **kwargs):
from django.conf import settings
self.admin_media_prefix = settings.ADMIN_MEDIA_PREFIX
# We set self.path to avoid crashes in log_message() on unsupported
# requests (like "OPTIONS").
self.path = ''
self.style = color_style()
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.unquote(path)
env['QUERY_STRING'] = query
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for h in self.headers.headers:
k,v = h.split(':',1)
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(self.rfile, self.wfile, self.get_stderr(), self.get_environ())
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def log_message(self, format, *args):
# Don't bother logging requests for admin images or the favicon.
if self.path.startswith(self.admin_media_prefix) or self.path == '/favicon.ico':
return
msg = "[%s] %s\n" % (self.log_date_time_string(), format % args)
# Utilize terminal colors, if available
if args[1][0] == '2':
# Put 2XX first, since it should be the common case
msg = self.style.HTTP_SUCCESS(msg)
elif args[1][0] == '1':
msg = self.style.HTTP_INFO(msg)
elif args[1] == '304':
msg = self.style.HTTP_NOT_MODIFIED(msg)
elif args[1][0] == '3':
msg = self.style.HTTP_REDIRECT(msg)
elif args[1] == '404':
msg = self.style.HTTP_NOT_FOUND(msg)
elif args[1][0] == '4':
msg = self.style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = self.style.HTTP_SERVER_ERROR(msg)
sys.stderr.write(msg)
class AdminMediaHandler(object):
"""
WSGI middleware that intercepts calls to the admin media directory, as
defined by the ADMIN_MEDIA_PREFIX setting, and serves those images.
Use this ONLY LOCALLY, for development! This hasn't been tested for
security and is not super efficient.
"""
def __init__(self, application, media_dir=None):
from django.conf import settings
self.application = application
if not media_dir:
import django
self.media_dir = \
os.path.join(django.__path__[0], 'contrib', 'admin', 'media')
else:
self.media_dir = media_dir
self.media_url = settings.ADMIN_MEDIA_PREFIX
def file_path(self, url):
"""
Returns the path to the media file on disk for the given URL.
The passed URL is assumed to begin with ADMIN_MEDIA_PREFIX. If the
resultant file path is outside the media directory, then a ValueError
is raised.
"""
# Remove ADMIN_MEDIA_PREFIX.
relative_url = url[len(self.media_url):]
relative_path = urllib.url2pathname(relative_url)
return safe_join(self.media_dir, relative_path)
def __call__(self, environ, start_response):
import os.path
# Ignore requests that aren't under ADMIN_MEDIA_PREFIX. Also ignore
# all requests if ADMIN_MEDIA_PREFIX isn't a relative URL.
if self.media_url.startswith('http://') or self.media_url.startswith('https://') \
or not environ['PATH_INFO'].startswith(self.media_url):
return self.application(environ, start_response)
# Find the admin file and serve it up, if it exists and is readable.
try:
file_path = self.file_path(environ['PATH_INFO'])
except ValueError: # Resulting file path was not valid.
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
start_response(status, headers.items())
return output
if not os.path.exists(file_path):
status = '404 NOT FOUND'
headers = {'Content-type': 'text/plain'}
output = ['Page not found: %s' % environ['PATH_INFO']]
else:
try:
fp = open(file_path, 'rb')
except IOError:
status = '401 UNAUTHORIZED'
headers = {'Content-type': 'text/plain'}
output = ['Permission denied: %s' % environ['PATH_INFO']]
else:
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first
# request (assuming the browser/client supports conditional
# GET).
mtime = http_date(os.stat(file_path)[stat.ST_MTIME])
headers = {'Last-Modified': mtime}
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 NOT MODIFIED'
output = []
else:
status = '200 OK'
mime_type = mimetypes.guess_type(file_path)[0]
if mime_type:
headers['Content-Type'] = mime_type
output = [fp.read()]
fp.close()
start_response(status, headers.items())
return output
class WSGIServerV6(WSGIServer):
address_family = socket.AF_INET6
def run(addr, port, wsgi_handler, enable_ipv6=False):
server_address = (addr, port)
server_class = (enable_ipv6 and WSGIServerV6) or WSGIServer
httpd = server_class(server_address, WSGIRequestHandler)
httpd.set_app(wsgi_handler)
httpd.serve_forever()
| |
import os
import shlex
import urlparse
import voluptuous as V
from memsql_loader.util.attr_dict import AttrDict
from memsql_loader.util import log
from memsql_loader.vendor import glob2
class InvalidKeyException(Exception):
pass
class LoadPath(object):
def __init__(self, path):
self.path = path
parsed = urlparse.urlparse(path)
self.bucket = None
if parsed.scheme == 's3':
self.scheme = 's3'
self.bucket = parsed.netloc
# this strips the starting /
self.pattern = parsed.path[1:]
elif parsed.scheme == 'hdfs':
self.scheme = 'hdfs'
self.pattern = parsed.netloc + parsed.path
self.pattern = self.pattern.lstrip('/')
elif parsed.scheme == 'file' or not parsed.scheme:
self.scheme = 'file'
# cannot use os.path.join because of the starting /
self.pattern = parsed.netloc + parsed.path
else:
raise V.Invalid("Unknown file scheme %s" % parsed.scheme, path=[ 'source', 'paths' ])
if self.scheme == 'file' and '|' in self.pattern:
raise V.Invalid("OR (|) operators are not supported in file patterns", path=[ 'source', 'paths' ])
if self.bucket is not None and glob2.has_magic(self.bucket):
raise V.Invalid("Buckets (%s) cannot have pattern characters ('*', '[', ']')" % (self.bucket), path=[ 'source', 'paths' ])
if self.bucket and not self.pattern:
raise V.Invalid("Path '%s' specifies a bucket ('%s') but cannot match any keys" % (path, self.bucket), path=[ 'source', 'paths' ])
def __str__(self):
bucket_s = self.bucket.encode('utf-8') if self.bucket else ''
pattern_s = self.pattern.encode('utf-8')
return self.scheme + "://" + os.path.join(bucket_s, pattern_s)
DEFAULT_AWS_ACCESS_KEY = None
DEFAULT_AWS_SECRET_KEY = None
def get_spec_validator():
_options_fields_schema = V.Schema({
V.Required("terminated", default='\t'): basestring,
V.Required("enclosed", default=""): basestring,
V.Required("escaped", default="\\"): basestring
})
_options_lines_schema = V.Schema({
V.Required("ignore", default=0): int,
V.Required("starting", default=""): basestring,
V.Required("terminated", default='\n'): basestring
})
_options_schema = V.Schema({
V.Required("fields", default=_options_fields_schema({})): _options_fields_schema,
V.Required("lines", default=_options_lines_schema({})): _options_lines_schema,
V.Required("columns", default=[]): [basestring],
V.Required("file_id_column", default=None): V.Any(basestring, None),
V.Required("non_local_load", default=False): bool,
V.Required("duplicate_key_method", default="error"): V.Any("error", "replace", "ignore"),
V.Required("script", default=None): V.Any(basestring, None)
})
_db_schema = V.Schema({
V.Required('host', default='127.0.0.1'): basestring,
V.Required('port', default=3306): int,
V.Required('user', default='root'): basestring,
V.Required('password', default=''): basestring,
})
# Each path in paths looks something like:
# [s3://|file://|hdfs://][bucket/]file/pattern
SPEC_VALIDATOR = V.Schema({
V.Required("source"): V.Schema({
V.Required("aws_access_key", default=DEFAULT_AWS_ACCESS_KEY): V.Any(basestring, None),
V.Required("aws_secret_key", default=DEFAULT_AWS_SECRET_KEY): V.Any(basestring, None),
V.Required("hdfs_host", default=None): V.Any(basestring, None),
V.Required("webhdfs_port", default=50070): V.Any(int, None),
V.Required("hdfs_user", default=None): V.Any(basestring, None),
V.Required("paths"): [basestring],
}),
V.Required("connection", default=_db_schema({})): _db_schema,
V.Required("target"): V.Schema({
V.Required("database"): basestring,
V.Required("table"): basestring
}, required=True),
V.Required("options", default=_options_schema({})): _options_schema
})
return SPEC_VALIDATOR
def get_command_line_options(key_list):
""" This is not the prettiest thing in the world. The idea is to
match a schema path (like options.fields.terminated) into one of
the command line options. The command line options are written to
make sense as command line parameters, so they don't straightforwardly
match the JSON spec. To account for this, we allow any suffix (for both
the forward and reverse path). Below, we also assert that every field
in the schema has exactly one matching command line option.
Some examples:
source::aws_access_key matches --aws-access-key
options::fields::terminated matches --fields-terminated
options::lines::ignore matches --ignore-lines
"""
ret = set()
for l in [key_list[n:] for n in range(len(key_list))]:
ret.add('_'.join(l))
for l in [key_list[-n:] for n in range(len(key_list))]:
ret.add('_'.join(reversed(l)))
return list(ret)
COMMAND_LINE_MAPPING = {}
def set_command_line_mapping(all_keys, option_name):
global COMMAND_LINE_MAPPING
base = COMMAND_LINE_MAPPING
for k_o in all_keys[:-1]:
k = str(k_o)
if k not in base:
base[k] = {}
base = base[k]
base[str(all_keys[-1])] = option_name
def get_command_line_mapping(all_keys):
global COMMAND_LINE_MAPPING
base = COMMAND_LINE_MAPPING
for k_o in all_keys[:-1]:
k = str(k_o)
base = base[k]
return base[str(all_keys[-1])]
def build_spec_recursive(logger, options, base_spec, validator, parent_keys):
ret = {}
valid_keys = set([ str(k) for k in validator.schema.keys() ])
for key in base_spec.keys():
if key not in valid_keys:
raise InvalidKeyException("%s is not a valid key" % key)
for key, val in validator.schema.items():
key_s = str(key)
full_key_path = parent_keys + [key]
schema_path = ".".join(map(str, full_key_path))
if isinstance(val, V.Schema):
# Recurse on a subspec
base_val = base_spec[key_s] if key_s in base_spec else {}
newval = build_spec_recursive(logger, options, base_val, val, full_key_path)
else:
# Match it to a command line option (and assert that exactly one exists)
cl_options = get_command_line_options(map(str, full_key_path))
found = False
for opt in cl_options:
if hasattr(options, opt):
assert not found, "Multiple keys for path %s have options (%s)" % (schema_path, cl_options)
newval = getattr(options, opt)
set_command_line_mapping(full_key_path, opt)
found = True
assert found, "No command line option for %s (%s)" % (schema_path, cl_options)
if newval is not None:
# this means that the user passed in an option that overrides the spec
ret[key_s] = newval
elif key_s in base_spec:
ret[key_s] = base_spec[key_s]
return ret
def build_spec(base_spec, options):
# for each part in the base_spec, we expect one of two
# things to be exposed in the options -> either the key name
# itself or full-schema-path-to-keyname.
logger = log.get_logger('Schema')
return build_spec_recursive(logger, options, base_spec, get_spec_validator(), [])
def validate_spec(spec):
spec = AttrDict.from_dict(get_spec_validator()(spec))
# post validation steps go here
assert 'file_id_column' in spec.options
if spec.options.file_id_column is not None:
file_id_column = spec['options']['file_id_column']
if 'columns' not in spec['options']:
raise V.Invalid('options.columns must be specified if file_id_column is provided', path=[ 'options', 'columns' ])
else:
if file_id_column in spec['options']['columns']:
raise V.Invalid('options.columns can not contain the file_id_column, it will be filled in by MemSQL-Loader',
path=[ 'options', 'columns' ])
if spec.options.script is not None:
try:
shlex.split(spec.options.script)
except ValueError as e:
raise V.Invalid('options.script is invalid: %s' % str(e), path=[ 'options', 'script' ])
return spec
| |
"""
Graphical model (GM)-based optimization algorithm using Theano
"""
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
import logging
import time
import numpy as np
from scipy.special import erf
import pyll
from pyll import scope
from pyll.stochastic import implicit_stochastic
from .base import miscs_to_idxs_vals
from .base import miscs_update_idxs_vals
from .base import Trials
import rand
logger = logging.getLogger(__name__)
EPS = 1e-12
# -- default linear forgetting. don't try to change by writing this variable
# because it's captured in function default args when this file is read
DEFAULT_LF = 25
adaptive_parzen_samplers = {}
def adaptive_parzen_sampler(name):
def wrapper(f):
assert name not in adaptive_parzen_samplers
adaptive_parzen_samplers[name] = f
return f
return wrapper
#
# These are some custom distributions
# that are used to represent posterior distributions.
#
# -- Categorical
@scope.define
def categorical_lpdf(sample, p, upper):
"""
"""
if sample.size:
return np.log(np.asarray(p)[sample])
else:
return np.asarray([])
# -- Bounded Gaussian Mixture Model (BGMM)
@implicit_stochastic
@scope.define
def GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None,
size=()):
"""Sample from truncated 1-D Gaussian Mixture Model"""
weights, mus, sigmas = map(np.asarray, (weights, mus, sigmas))
assert len(weights) == len(mus) == len(sigmas)
n_samples = np.prod(size)
#n_components = len(weights)
if low is None and high is None:
# -- draw from a standard GMM
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
samples = rng.normal(loc=mus[active], scale=sigmas[active])
else:
# -- draw from truncated components
# TODO: one-sided-truncation
low = float(low)
high = float(high)
if low >= high:
raise ValueError('low >= high', (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(draw)
samples = np.reshape(np.asarray(samples), size)
#print 'SAMPLES', samples
if q is None:
return samples
else:
return np.round(samples / q) * q
@scope.define
def normal_cdf(x, mu, sigma):
top = (x - mu)
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = top / bottom
return 0.5 * (1 + erf(z))
@scope.define
def GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
verbose = 0
samples, weights, mus, sigmas = map(np.asarray,
(samples, weights, mus, sigmas))
if samples.size == 0:
return np.asarray([])
if weights.ndim != 1:
raise TypeError('need vector of weights', weights.shape)
if mus.ndim != 1:
raise TypeError('need vector of mus', mus.shape)
if sigmas.ndim != 1:
raise TypeError('need vector of sigmas', sigmas.shape)
assert len(weights) == len(mus) == len(sigmas)
_samples = samples
samples = _samples.flatten()
if verbose:
print 'GMM1_lpdf:samples', set(samples)
print 'GMM1_lpdf:weights', weights
print 'GMM1_lpdf:mus', mus
print 'GMM1_lpdf:sigmas', sigmas
print 'GMM1_lpdf:low', low
print 'GMM1_lpdf:high', high
print 'GMM1_lpdf:q', q
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (
normal_cdf(high, mus, sigmas)
- normal_cdf(low, mus, sigmas)))
if q is None:
dist = samples[:, None] - mus
mahal = (dist / np.maximum(sigmas, EPS)) ** 2
# mahal shape is (n_samples, n_components)
Z = np.sqrt(2 * np.pi * sigmas ** 2)
coef = weights / Z / p_accept
rval = logsum_rows(- 0.5 * mahal + np.log(coef))
else:
prob = np.zeros(samples.shape, dtype='float64')
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + q / 2.0
else:
ubound = np.minimum(samples + q / 2.0, high)
if low is None:
lbound = samples - q / 2.0
else:
lbound = np.maximum(samples - q / 2.0, low)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * normal_cdf(ubound, mu, sigma)
inc_amt -= w * normal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
if verbose:
print 'GMM1_lpdf:rval:', dict(zip(samples, rval))
rval.shape = _samples.shape
return rval
# -- Mixture of Log-Normals
@scope.define
def lognormal_cdf(x, mu, sigma):
# wikipedia claims cdf is
# .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))
#
# the maximum is used to move negative values and 0 up to a point
# where they do not cause nan or inf, but also don't contribute much
# to the cdf.
if len(x) == 0:
return np.asarray([])
if x.min() < 0:
raise ValueError('negative arg to lognormal_cdf', x)
olderr = np.seterr(divide='ignore')
try:
top = np.log(np.maximum(x, EPS)) - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = top / bottom
return .5 + .5 * erf(z)
finally:
np.seterr(**olderr)
@scope.define
def lognormal_lpdf(x, mu, sigma):
# formula copied from wikipedia
# http://en.wikipedia.org/wiki/Log-normal_distribution
assert np.all(sigma >= 0)
sigma = np.maximum(sigma, EPS)
Z = sigma * x * np.sqrt(2 * np.pi)
E = 0.5 * ((np.log(x) - mu) / sigma) ** 2
rval = -E - np.log(Z)
return rval
@scope.define
def qlognormal_lpdf(x, mu, sigma, q):
# casting rounds up to nearest step multiple.
# so lpdf is log of integral from x-step to x+1 of P(x)
# XXX: subtracting two numbers potentially very close together.
return np.log(
lognormal_cdf(x, mu, sigma)
- lognormal_cdf(x - q, mu, sigma))
@implicit_stochastic
@scope.define
def LGMM1(weights, mus, sigmas, low=None, high=None, q=None,
rng=None, size=()):
weights, mus, sigmas = map(np.asarray, (weights, mus, sigmas))
n_samples = np.prod(size)
#n_components = len(weights)
if low is None and high is None:
active = np.argmax(
rng.multinomial(1, weights, (n_samples,)),
axis=1)
assert len(active) == n_samples
samples = np.exp(
rng.normal(
loc=mus[active],
scale=sigmas[active]))
else:
# -- draw from truncated components
# TODO: one-sided-truncation
low = float(low)
high = float(high)
if low >= high:
raise ValueError('low >= high', (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(np.exp(draw))
samples = np.asarray(samples)
samples = np.reshape(np.asarray(samples), size)
if q is not None:
samples = np.round(samples / q) * q
return samples
def logsum_rows(x):
R, C = x.shape
m = x.max(axis=1)
return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m
@scope.define
def LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
samples, weights, mus, sigmas = map(np.asarray,
(samples, weights, mus, sigmas))
assert weights.ndim == 1
assert mus.ndim == 1
assert sigmas.ndim == 1
_samples = samples
if samples.ndim != 1:
samples = samples.flatten()
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (
normal_cdf(high, mus, sigmas)
- normal_cdf(low, mus, sigmas)))
if q is None:
# compute the lpdf of each sample under each component
lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)
rval = logsum_rows(lpdfs + np.log(weights))
else:
# compute the lpdf of each sample under each component
prob = np.zeros(samples.shape, dtype='float64')
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + q / 2.0
else:
ubound = np.minimum(samples + q / 2.0, np.exp(high))
if low is None:
lbound = samples - q / 2.0
else:
lbound = np.maximum(samples - q / 2.0, np.exp(low))
lbound = np.maximum(0, lbound)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * lognormal_cdf(ubound, mu, sigma)
inc_amt -= w * lognormal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
rval.shape = _samples.shape
return rval
#
# This is the weird heuristic ParzenWindow estimator used for continuous
# distributions in various ways.
#
@scope.define_info(o_len=3)
def adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):
"""
A heuristic estimator for the mu and sigma values of a GMM
TODO: try to find this heuristic in the literature, and cite it - Yoshua
mentioned the term 'elastic' I think?
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus_orig = np.array(mus)
mus = np.array(mus)
assert str(mus.dtype) != 'object'
if mus.ndim != 1:
raise TypeError('mus must be vector', mus)
if len(mus) == 0:
mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
elif len(mus) == 1:
mus = np.asarray([prior_mu] + [mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * .5])
elif len(mus) >= 2:
order = np.argsort(mus)
mus = mus[order]
sigma = np.zeros_like(mus)
sigma[1:-1] = np.maximum(
mus[1:-1] - mus[0:-2],
mus[2:] - mus[1:-1])
if len(mus) > 2:
lsigma = mus[2] - mus[0]
usigma = mus[-1] - mus[-3]
else:
lsigma = mus[1] - mus[0]
usigma = mus[-1] - mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
# XXX: is sorting them necessary anymore?
# un-sort the mus and sigma
mus[order] = mus.copy()
sigma[order] = sigma.copy()
if not np.all(mus_orig == mus):
print 'orig', mus_orig
print 'mus', mus
assert np.all(mus_orig == mus)
# put the prior back in
mus = np.asarray([prior_mu] + list(mus))
sigma = np.asarray([prior_sigma] + list(sigma))
maxsigma = prior_sigma
# -- magic formula:
minsigma = prior_sigma / np.sqrt(1 + len(mus))
#print 'maxsigma, minsigma', maxsigma, minsigma
sigma = np.clip(sigma, minsigma, maxsigma)
weights = np.ones(len(mus), dtype=mus.dtype)
weights[0] = prior_weight
#print weights.dtype
weights = weights / weights.sum()
if 0:
print 'WEIGHTS', weights
print 'MUS', mus
print 'SIGMA', sigma
return weights, mus, sigma
@scope.define
def linear_forgetting_weights(N, LF):
assert N >= 0
assert LF > 0
if N == 0:
return np.asarray([])
elif N < LF:
return np.ones(N)
else:
ramp = np.linspace(1.0 / N, 1.0, num=N - LF)
flat = np.ones(LF)
weights = np.concatenate([ramp, flat], axis=0)
assert weights.shape == (N,), (weights.shape, N)
return weights
# XXX: make TPE do a post-inference pass over the pyll graph and insert
# non-default LF argument
@scope.define_info(o_len=3)
def adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma,
LF=DEFAULT_LF):
"""
mus - matrix (N, M) of M, N-dimensional component centers
"""
#mus_orig = np.array(mus)
mus = np.array(mus)
assert str(mus.dtype) != 'object'
if mus.ndim != 1:
raise TypeError('mus must be vector', mus)
if len(mus) == 0:
srtd_mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
prior_pos = 0
elif len(mus) == 1:
if prior_mu < mus[0]:
prior_pos = 0
srtd_mus = np.asarray([prior_mu, mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * .5])
else:
prior_pos = 1
srtd_mus = np.asarray([mus[0], prior_mu])
sigma = np.asarray([prior_sigma * .5, prior_sigma])
elif len(mus) >= 2:
# create new_mus, which is sorted, and in which
# the prior has been inserted
order = np.argsort(mus)
prior_pos = np.searchsorted(mus[order], prior_mu)
srtd_mus = np.zeros(len(mus) + 1)
srtd_mus[:prior_pos] = mus[order[:prior_pos]]
srtd_mus[prior_pos] = prior_mu
srtd_mus[prior_pos + 1:] = mus[order[prior_pos:]]
sigma = np.zeros_like(srtd_mus)
sigma[1:-1] = np.maximum(
srtd_mus[1:-1] - srtd_mus[0:-2],
srtd_mus[2:] - srtd_mus[1:-1])
lsigma = srtd_mus[1] - srtd_mus[0]
usigma = srtd_mus[-1] - srtd_mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
if LF and LF < len(mus):
unsrtd_weights = linear_forgetting_weights(len(mus), LF)
srtd_weights = np.zeros_like(srtd_mus)
assert len(unsrtd_weights) + 1 == len(srtd_mus)
srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]
srtd_weights[prior_pos] = prior_weight
srtd_weights[prior_pos + 1:] = unsrtd_weights[order[prior_pos:]]
else:
srtd_weights = np.ones(len(srtd_mus))
srtd_weights[prior_pos] = prior_weight
# -- magic formula:
maxsigma = prior_sigma / 1.0
minsigma = prior_sigma / min(100.0, (1.0 + len(srtd_mus)))
#print 'maxsigma, minsigma', maxsigma, minsigma
sigma = np.clip(sigma, minsigma, maxsigma)
sigma[prior_pos] = prior_sigma
assert prior_sigma > 0
assert maxsigma > 0
assert minsigma > 0
assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)
#print weights.dtype
srtd_weights /= srtd_weights.sum()
if 0:
print 'WEIGHTS', srtd_weights
print 'MUS', srtd_mus
print 'SIGMA', sigma
return srtd_weights, srtd_mus, sigma
#
# Adaptive Parzen Samplers
# These produce conditional estimators for various prior distributions
#
# -- Uniform
@adaptive_parzen_sampler('uniform')
def ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(obs,
prior_weight, prior_mu, prior_sigma)
return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=None,
size=size, rng=rng)
@adaptive_parzen_sampler('quniform')
def ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(obs,
prior_weight, prior_mu, prior_sigma)
return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q,
size=size, rng=rng)
@adaptive_parzen_sampler('loguniform')
def ap_loguniform_sampler(obs, prior_weight, low, high,
size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, prior_mu, prior_sigma)
rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high,
size=size, rng=rng)
return rval
@adaptive_parzen_sampler('qloguniform')
def ap_qloguniform_sampler(obs, prior_weight, low, high, q,
size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(
# -- map observations that were quantized to be below exp(low)
# (particularly 0) back up to exp(low) where they will
# interact in a reasonable way with the AdaptiveParzen
# thing.
scope.maximum(
obs,
scope.maximum( # -- protect against exp(low) underflow
EPS,
scope.exp(low)))),
prior_weight, prior_mu, prior_sigma)
return scope.LGMM1(weights, mus, sigmas, low, high, q=q,
size=size, rng=rng)
# -- Normal
@adaptive_parzen_sampler('normal')
def ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)
@adaptive_parzen_sampler('qnormal')
def ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
@adaptive_parzen_sampler('lognormal')
def ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, mu, sigma)
rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)
return rval
@adaptive_parzen_sampler('qlognormal')
def ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
log_obs = scope.log(scope.maximum(obs, EPS))
weights, mus, sigmas = scope.adaptive_parzen_normal(
log_obs, prior_weight, mu, sigma)
rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
return rval
# -- Categorical
@adaptive_parzen_sampler('randint')
def ap_categorical_sampler(obs, prior_weight, upper,
size=(), rng=None, LF=DEFAULT_LF):
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
counts = scope.bincount(obs, minlength=upper, weights=weights)
# -- add in some prior pseudocounts
pseudocounts = counts + prior_weight
return scope.categorical(pseudocounts / scope.sum(pseudocounts),
upper=upper, size=size, rng=rng)
# @adaptive_parzen_sampler('categorical')
# def ap_categorical_sampler(obs, prior_weight, p, upper, size=(), rng=None,
# LF=DEFAULT_LF):
# return scope.categorical(p, upper, size=size, rng
# =rng)
@scope.define
def tpe_cat_pseudocounts(counts, upper, prior_weight, p, size):
#print counts
if size == 0 or np.prod(size) == 0:
return []
if p.ndim == 2:
assert np.all(p == p[0])
p = p[0]
pseudocounts = counts + upper * (prior_weight * p)
return pseudocounts / np.sum(pseudocounts)
@adaptive_parzen_sampler('categorical')
def ap_categorical_sampler(obs, prior_weight, p, upper=None,
size=(), rng=None, LF=DEFAULT_LF):
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
counts = scope.bincount(obs, minlength=upper, weights=weights)
pseudocounts = scope.tpe_cat_pseudocounts(counts, upper, prior_weight, p, size)
return scope.categorical(pseudocounts, upper=upper, size=size, rng=rng)
#
# Posterior clone performs symbolic inference on the pyll graph of priors.
#
@scope.define_info(o_len=2)
def ap_filter_trials(o_idxs, o_vals, l_idxs, l_vals, gamma,
gamma_cap=DEFAULT_LF):
"""Return the elements of o_vals that correspond to trials whose losses
were above gamma, or below gamma.
"""
o_idxs, o_vals, l_idxs, l_vals = map(np.asarray, [o_idxs, o_vals, l_idxs,
l_vals])
# XXX if this is working, refactor this sort for efficiency
# Splitting is done this way to cope with duplicate loss values.
n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)
l_order = np.argsort(l_vals)
keep_idxs = set(l_idxs[l_order[:n_below]])
below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
if 0:
print 'DEBUG: thresh', l_vals[l_order[:n_below]]
keep_idxs = set(l_idxs[l_order[n_below:]])
above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
#print 'AA0', below
#print 'AA1', above
return np.asarray(below), np.asarray(above)
def build_posterior(specs, prior_idxs, prior_vals, obs_idxs, obs_vals,
oloss_idxs, oloss_vals, oloss_gamma, prior_weight):
"""
This method clones a posterior inference graph by iterating forward in
topological order, and replacing prior random-variables (prior_vals) with
new posterior distributions that make use of observations (obs_vals).
"""
assert all(isinstance(arg, pyll.Apply)
for arg in [oloss_idxs, oloss_vals, oloss_gamma])
expr = pyll.as_apply([specs, prior_idxs, prior_vals])
nodes = pyll.dfs(expr)
# build the joint posterior distribution as the values in this memo
memo = {}
# map prior RVs to observations
obs_memo = {}
for nid in prior_vals:
# construct the leading args for each call to adaptive_parzen_sampler
# which will permit the "adaptive parzen samplers" to adapt to the
# correct samples.
obs_below, obs_above = scope.ap_filter_trials(
obs_idxs[nid], obs_vals[nid],
oloss_idxs, oloss_vals, oloss_gamma)
obs_memo[prior_vals[nid]] = [obs_below, obs_above]
for node in nodes:
if node not in memo:
new_inputs = [memo[arg] for arg in node.inputs()]
if node in obs_memo:
# -- this case corresponds to an observed Random Var
# node.name is a distribution like "normal", "randint", etc.
obs_below, obs_above = obs_memo[node]
aa = [memo[a] for a in node.pos_args]
fn = adaptive_parzen_samplers[node.name]
b_args = [obs_below, prior_weight] + aa
named_args = [[kw, memo[arg]]
for (kw, arg) in node.named_args]
b_post = fn(*b_args, **dict(named_args))
a_args = [obs_above, prior_weight] + aa
a_post = fn(*a_args, **dict(named_args))
assert a_post.name == b_post.name
fn_lpdf = getattr(scope, a_post.name + '_lpdf')
#print fn_lpdf
a_kwargs = dict([(n, a) for n, a in a_post.named_args
if n not in ('rng', 'size')])
b_kwargs = dict([(n, a) for n, a in b_post.named_args
if n not in ('rng', 'size')])
# calculate the llik of b_post under both distributions
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
#improvement = below_llik - above_llik
#new_node = scope.broadcast_best(b_post, improvement)
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
elif hasattr(node, 'obj'):
# -- keep same literals in the graph
new_node = node
else:
# -- this case is for all the other stuff in the graph
new_node = node.clone_from_inputs(new_inputs)
memo[node] = new_node
post_specs = memo[specs]
post_idxs = dict([(nid, memo[idxs])
for nid, idxs in prior_idxs.items()])
post_vals = dict([(nid, memo[vals])
for nid, vals in prior_vals.items()])
assert set(post_idxs.keys()) == set(post_vals.keys())
assert set(post_idxs.keys()) == set(prior_idxs.keys())
return post_specs, post_idxs, post_vals
@scope.define
def idxs_prod(full_idxs, idxs_by_label, llik_by_label):
"""Add all of the log-likelihoods together by id.
Example arguments:
full_idxs = [0, 1, ... N-1]
idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}
llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}
This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]
"""
#print 'FULL IDXS'
#print full_idxs
assert len(set(full_idxs)) == len(full_idxs)
full_idxs = list(full_idxs)
rval = np.zeros(len(full_idxs))
pos_of_tid = dict(zip(full_idxs, range(len(full_idxs))))
assert set(idxs_by_label.keys()) == set(llik_by_label.keys())
for nid in idxs_by_label:
idxs = idxs_by_label[nid]
llik = llik_by_label[nid]
assert np.all(np.asarray(idxs) > 1)
assert len(set(idxs)) == len(idxs)
assert len(idxs) == len(llik)
for ii, ll in zip(idxs, llik):
rval[pos_of_tid[ii]] += ll
#rval[full_idxs.index(ii)] += ll
return rval
@scope.define
def broadcast_best(samples, below_llik, above_llik):
if len(samples):
#print 'AA2', dict(zip(samples, below_llik - above_llik))
score = below_llik - above_llik
if len(samples) != len(score):
raise ValueError()
best = np.argmax(score)
return [samples[best]] * len(samples)
else:
return []
_default_prior_weight = 1.0
# -- suggest best of this many draws on every iteration
_default_n_EI_candidates = 24
# -- gamma * sqrt(n_trials) is fraction of to use as good
_default_gamma = 0.25
_default_n_startup_jobs = 20
_default_linear_forgetting = DEFAULT_LF
def tpe_transform(domain, prior_weight, gamma):
s_prior_weight = pyll.Literal(float(prior_weight))
# -- these dummy values will be replaced in suggest1() and never used
observed = dict(
idxs=pyll.Literal(),
vals=pyll.Literal())
observed_loss = dict(
idxs=pyll.Literal(),
vals=pyll.Literal())
specs, idxs, vals = build_posterior(
# -- vectorized clone of bandit template
domain.vh.v_expr,
# -- this dict and next represent prior dists
domain.vh.idxs_by_label(),
domain.vh.vals_by_label(),
observed['idxs'],
observed['vals'],
observed_loss['idxs'],
observed_loss['vals'],
pyll.Literal(gamma),
s_prior_weight
)
return (s_prior_weight, observed, observed_loss,
specs, idxs, vals)
def suggest(new_ids, domain, trials, seed,
prior_weight=_default_prior_weight,
n_startup_jobs=_default_n_startup_jobs,
n_EI_candidates=_default_n_EI_candidates,
gamma=_default_gamma,
linear_forgetting=_default_linear_forgetting,
):
new_id, = new_ids
t0 = time.time()
(s_prior_weight, observed, observed_loss, specs, opt_idxs, opt_vals) \
= tpe_transform(domain, prior_weight, gamma)
tt = time.time() - t0
logger.info('tpe_transform took %f seconds' % tt)
best_docs = dict()
best_docs_loss = dict()
for doc in trials.trials:
# get either this docs own tid or the one that it's from
tid = doc['misc'].get('from_tid', doc['tid'])
loss = domain.loss(doc['result'], doc['spec'])
if loss is None:
# -- associate infinite loss to new/running/failed jobs
loss = float('inf')
else:
loss = float(loss)
best_docs_loss.setdefault(tid, loss)
if loss <= best_docs_loss[tid]:
best_docs_loss[tid] = loss
best_docs[tid] = doc
tid_docs = best_docs.items()
# -- sort docs by order of suggestion
# so that linear_forgetting removes the oldest ones
tid_docs.sort()
losses = [best_docs_loss[k] for k, v in tid_docs]
tids = [k for k, v in tid_docs]
docs = [v for k, v in tid_docs]
if docs:
logger.info('TPE using %i/%i trials with best loss %f' % (
len(docs), len(trials), min(best_docs_loss.values())))
else:
logger.info('TPE using 0 trials')
if len(docs) < n_startup_jobs:
# N.B. THIS SEEDS THE RNG BASED ON THE new_id
return rand.suggest(new_ids, domain, trials, seed)
# Sample and compute log-probability.
if tids:
# -- the +2 co-ordinates with an assertion above
# to ensure that fake ids are used during sampling
fake_id_0 = max(max(tids), new_id) + 2
else:
# -- weird - we're running the TPE algo from scratch
assert n_startup_jobs <= 0
fake_id_0 = new_id + 2
fake_ids = range(fake_id_0, fake_id_0 + n_EI_candidates)
# -- this dictionary will map pyll nodes to the values
# they should take during the evaluation of the pyll program
memo = {
domain.s_new_ids: fake_ids,
domain.s_rng: np.random.RandomState(seed),
}
o_idxs_d, o_vals_d = miscs_to_idxs_vals(
[d['misc'] for d in docs], keys=domain.params.keys())
memo[observed['idxs']] = o_idxs_d
memo[observed['vals']] = o_vals_d
memo[observed_loss['idxs']] = tids
memo[observed_loss['vals']] = losses
idxs, vals = pyll.rec_eval([opt_idxs, opt_vals], memo=memo,
print_node_on_error=False)
# -- retrieve the best of the samples and form the return tuple
# the build_posterior makes all specs the same
rval_specs = [None] # -- specs are deprecated
rval_results = [domain.new_result()]
rval_miscs = [dict(tid=new_id, cmd=domain.cmd, workdir=domain.workdir)]
miscs_update_idxs_vals(rval_miscs, idxs, vals,
idxs_map={fake_ids[0]: new_id},
assert_all_vals_used=False)
rval_docs = trials.new_trial_docs([new_id],
rval_specs, rval_results, rval_miscs)
return rval_docs
| |
# Copyright (C) 2012, Christof Buchbender
# BSD License (License.txt)
import math
import os
import string
import sys
import ConfigParser
import numpy as np
from pysqlite2 import dbapi2 as sqlite
from scipy.ndimage import gaussian_filter
from generaltools import log_tools
import astrolyze.functions.constants as const
from astrolyze.functions import units
class Map(object):
'''
``Map`` is the parent Class for the ``maps``-package. It contains all
functions that are common to all supported map-formats, i.e. Fits,
GILDAS and Miriad. This class is only supposed to be called through
the FitsMap, GildasMap, and MiriadMap classes.
Parameters
----------
map_name : string
The name and path of the file that is to be initialized to the maps
package.
'''
def __init__(self, map_name, **kwargs):
'''
Initialize a map to maps.
'''
USER = os.getenv("USER")
config_path = "/home/{}/.astrolyze/".format(USER)
config_file = "astrolyze.cfg"
self.config = ConfigParser.ConfigParser()
self.config.read("{}{}".format(config_path, config_file))
# Definition of the unit nomenclatures.
self.log = log_tools.init_logger(
directory="/home/{}/.astrolyze/".format(USER),
name="astrolyze"
)
USER = os.getenv("USER")
self.database_prefix = self.config.get("General", "database_prefix")
self.database_prefix = self.database_prefix.format(USER)
# Load configuration from config_file
self.jansky_beam_names = self.config.get(
"Units", "jansky_beam_names"
).split(',')
self.jansky_pixel_names = self.config.get(
"Units", "jansky_pixel_names"
).split(',')
self.tmb_names = self.config.get(
"Units", "tmb_names"
).split(',')
self.MJy_per_sterad_names = self.config.get(
"Units", "MJy_per_sterad_names"
).split(',')
self.erg_sec_pixel_names = self.config.get(
"Units", "erg_sec_pixel_names"
).split(',')
self.erg_sec_beam_names = self.config.get(
"Units", "erg_sec_beam_names"
).split(',')
self.erg_sec_sterad_names = self.config.get(
"Units", "erg_sec_sterad_names"
).split(',')
self.known_units = (self.jansky_beam_names + self.jansky_pixel_names +
self.tmb_names + self.MJy_per_sterad_names +
self.erg_sec_beam_names +
self.erg_sec_sterad_names +
self.erg_sec_pixel_names)
# Definition of possible data format endings for the different
# programs.
self.gildas_formats = self.config.get(
"Formats", "gildas_formats"
).split(',')
self.fits_formats = self.config.get(
"Formats", "fits_formats"
).split(',')
self.miriad_formats = ['',None]
self.class_formats = self.config.get(
"Formats", "class_formats"
).split(',')
# name_convention is not needed anymore. Only kept for backward
# compatibality.
self.map_name = map_name
# Test if the file exists. Directory for Miriad.
# File for Fits and GILDAS.
if ((not os.path.isdir(self.map_name)
and not os.path.isfile(self.map_name))):
self.log.critical(
'Exiting: {} does not exist'.format(self.map_name)
)
raise SystemExit
# Get Informations from the file name.
self.map_nameList = map_name.split('/')[-1].split('_')
if len(self.map_nameList) < 5:
self.log.critical(
"This map does not follow the Naming Convention for Astrolyze"
)
raise SystemExit
self.map_nameList = map_name.split('/')[-1].split('_')
self.prefix_list = map_name.split('/')[0:-1]
self.comments = []
self.source = self.map_nameList[0].split('/')[-1]
if len(self.prefix_list) > 0:
self.prefix = string.join(self.prefix_list, '/') + '/'
elif len(self.prefix_list) == 0:
self.prefix = ''
self.telescope = self.map_nameList[1]
self.species = self._resolveSpecies()
self.fluxUnit = self.map_nameList[3]
# Check dataFormat.
if self.map_name.endswith('.fits'):
self.dataFormat = 'fits'
self.map_nameList[-1] = self.map_nameList[-1].replace(
'.fits', ''
)
for i in self.gildas_formats:
if self.map_name.endswith('.' + i):
self.dataFormat = i
self.map_nameList[-1] = self.map_nameList[-1].replace(
'.' + i, ''
)
for i in self.class_formats:
if self.map_name.endswith('.' + i):
self.dataFormat = i
self.map_nameList[-1] = self.map_nameList[-1].replace(
'.' + i, ''
)
if os.path.isdir(self.map_name):
# Miriad Data Format uses directories
self.dataFormat = None
self.resolution = self._resolveResolution()
# Entries after the fifth are considered comments.
if len(self.map_nameList) > 5:
for i in range(len(self.map_nameList) - 6):
self.comments += [self.map_nameList[i + 5]]
self.comments += [self.map_nameList[-1]]
#!!!!! TODO: Bas implementation. Try should only contain very short
# parts of the program otherwise errors in the program are camouflaged.
try:
self.connection = sqlite.connect(str(self.database_prefix) +
'parameter.db')
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT * FROM Galaxies WHERE Name = ?",
(self.source.upper(),))
self.params = self.cursor.fetchall()[0]
self.type = self.params[2]
self.distance = self.params[3]
self.vlsr = self.params[4]
self.centralPosition = self.params[5]
self.pa = self.params[6]
self.inclination = self.params[7]
self.R25 = self.params[8]
self.cursor.close()
self.connection.close()
except:
self.params = None
self.type = None
self.distance = None
self.vlsr = None
self.centralPosition = None
self.pa = None
self.inclination = None
self.R25 = None
try:
self.connection = sqlite.connect(str(self.database_prefix) +
'parameter.db')
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT * FROM Lines WHERE Name = ?",
(self.species.upper(),))
self.params = self.cursor.fetchall()[0]
self.frequency = self.params[2]
self.wavelength = self.params[3]
self.cursor.close()
self.connection.close()
except:
pass
try:
self.connection = sqlite.connect(str(self.database_prefix) +
'parameter.db')
self.cursor = self.connection.cursor()
self.cursor.execute("SELECT * FROM cal_error WHERE Telescope = "
" ? AND Species = ?", (self.telescope.upper(),
self.species.upper()))
self.params = self.cursor.fetchall()[0]
self.calibrationError = self.params[3]
self.cursor.close()
self.connection.close()
except:
self.calibrationError = np.nan
self.get_beam_size()
def _resolveSpecies(self):
'''
Gets the frequency from a database on basis of the map name if
possible.
'''
species = self.map_nameList[2]
if 'mum' in species:
try:
self.wavelength = float(species.replace('mum', '')) * 1e-6
self.frequency = 299792356 / self.wavelength
except:
self.frequency = np.nan
self.wavelength = np.nan
elif 'mm' in species:
try:
self.wavelength = float(species.replace('mm', '')) * 1e-3
self.frequency = 299792356 / self.wavelength
except:
self.frequency = np.nan
self.wavelength = np.nan
elif 'GHz' in species:
try:
self.frequency = float(species.replace('GHz', '')) * 1e9
self.wavelength = 299792356 / self.frequency
except:
self.frequency = np.nan
self.wavelength = np.nan
else:
self.frequency = np.nan
self.wavelength = np.nan
return species
def _resolveResolution(self):
'''
Reads the resolution string from the map name.
'''
# TODO: include handling of 'uk'
string = self.map_nameList[4]
# Test if there is a digit after the last point.
# To exclude file endings like .fits.gdf.
# In this the dataFormat would be 'gdf'.
# but self.map_nameList[4] storing the resolution
# Still does contain points only due to numbers.
test = string.split('.')
x = True
print string
while x:
if 'uk' in string:
break
try:
float(test[-1][:1])
x = False
except KeyboardInterrupt:
sys.exit()
except:
string = string.replace('.{}'.format(test[-1]), '')
test = test[0:-1]
if 'uk' in string:
return string
# Resolve the resolution naming scheme explained above.
if 'x' in string and 'a' in string:
major = float(string.split('x')[0])
minor = float(string.split('x')[1].split('a')[0])
pa = float(string.split('x')[1].split('a')[1])
if 'x' in string and 'a' not in string:
major = float(string.split('x')[0])
minorData = string.split('x')[1]
pa = 0.0
if 'x' not in string and 'a' in string:
major = float(string.split('a')[0])
minor = float(string.split('a')[0])
paData = string.split('a')[1]
if 'x' not in string and 'a' not in string:
major = float(string)
minor = float(string)
pa = 0
return [major, minor, pa]
def resolutionToString(self, resolution=None):
r""" Converts the resolution list to a string to be printed and
included in the file names.
"""
if resolution is None:
if float(self.resolution[2]) == 0.0:
if float(self.resolution[0]) == float(self.resolution[1]):
string = "%1.2f" % self.resolution[0]
if float(self.resolution[0]) != float(self.resolution[1]):
string = ("%1.2f" % self.resolution[0] + 'x' +
"%1.2f" % self.resolution[1])
if float(self.resolution[2]) != 0.0:
if float(self.resolution[0]) == float(self.resolution[1]):
string = ("%1.2f" % self.resolution[0] + 'a' +
"%1.1f" % self.resolution[2])
if float(self.resolution[0]) != float(self.resolution[1]):
string = ("%1.2f" % self.resolution[0] + 'x' +
"%1.2f" % self.resolution[1] + 'a' +
"%1.1f" % self.resolution[2])
if resolution is not None and type(resolution) is not str:
if float(resolution[2]) == 0.0:
if float(resolution[0]) == float(resolution[1]):
string = "%1.2f" % resolution[0]
if float(resolution[0]) != float(resolution[1]):
string = ("%1.2f" % resolution[0] + 'x' +
"%1.2f" % resolution[1])
if float(resolution[2]) != 0.0:
if float(resolution[0]) == float(resolution[1]):
string = ("%1.2f" % resolution[0] + 'a' +
"%1.1f" % resolution[2])
if float(resolution[0]) != float(resolution[1]):
string = ("%1.2f" % resolution[0] + 'x' +
"%1.2f" % resolution[1] + 'a' +
"%1.1f" % resolution[2])
if type(resolution) is str:
string = resolution
return string
def get_beam_size(self):
r""" Calculates the beam-size in steradians and in m^2. Fot the latter
the distance to the source has to be given.
Returns
-------
Initialization if the variables:
self.beamSizeSterad and self.beamSizeM2
Notes
-----
The formula used is:
.. math:
\Omega = 1.133 * FWHM(rad)^2 \cdot (Distance(m)^2)
"""
if self.resolution != 'uk':
self.beamSizeSterad = (1.133 * const.a2r ** 2 * self.resolution[0]
* self.resolution[1])
if self.distance is not None:
self.beamSizeM2 = (1.133 * (self.distance * const.a2r *
const.pcInM) ** 2 *
self.resolution[0] * self.resolution[1])
else:
self.beamSize = np.nan
def change_map_name(self, source=None, telescope=None, species=None,
fluxUnit=None, resolution=None, comments=None,
dataFormat=False, prefix=None):
'''
This function can be used to change the names of the maps and make a
copy of the file to the new name and/or location.
'''
source = source or self.source
telescope = telescope or self.telescope
species = species or self.species
fluxUnit = fluxUnit or self.fluxUnit
resolution = resolution or self.resolution
if dataFormat is False:
dataFormat = self.dataFormat
prefix = prefix or self.prefix
if comments is None:
comments = comments or self.comments
# Now update the variables
self.source = source
self.telescope = telescope
self.species = species
self.fluxUnit = fluxUnit
self.resolution = resolution
if dataFormat is not False:
self.dataFormat = dataFormat
self.prefix = prefix
if comments is not None:
comments = self.comments + comments
self.comments = self.comments + comments
comment_string = _comment_to_string()
target_file_name = "{}{}_{}_{}_{}_{}{}.{}".format(
prefix,
source, telescope, species, fluxUnit,
self.resolutionToString(self.resolution),
comment_string,
dataFormat
)
if len(self.comments) == 0:
if str(self.map_name) != (str(prefix) + str(source) + '_' +
str(telescope) + '_' + str(species) +
'_' + str(fluxUnit) + '_' +
str(resolution) + '.' +
str(dataFormat)):
copy_command = "cp {} {}".format(self.map_name,
target_file_name)
subprocess.call(copy_command, shell=True)
self.map_name = target_file_name
if len(self.comments) != 0:
if ((str(self.map_name) != str(prefix) + str(source) + '_' +
str(telescope) + '_' + str(species) + '_' + str(fluxUnit) +
'_' + str(resolution) + '_' + '_'.join(self.comments) + '.' +
str(dataFormat))):
os.system('cp ' + str(self.map_name) + ' ' + str(prefix) +
str(source) + '_' + str(telescope) + '_' +
str(species) + '_' + str(fluxUnit) + '_' +
self.resolutionToString(self.resolution) + '_' +
'_'.join(self.comments) + '.' + str(dataFormat))
self.map_name = (str(prefix) + str(source) + '_' +
str(telescope) + '_' + str(species) + '_' +
str(fluxUnit) + '_' +
self.resolutionToString(self.resolution) +
'_' + '_'.join(self.comments) +
'.' + str(dataFormat))
def _comment_to_string(self):
""" Converts the comment list to a string
"""
comment_string = "_".join(self.comments)
if comment_string != "":
comment_string = "_{}".format(comment_string)
return comment_string
def returnName(self, source=None, telescope=None, species=None,
fluxUnit=None, resolution=None, comments=None,
dataFormat=False, prefix=None):
'''
Returns the Name corresponding to the Name convention. Single keywords
can be changed.
This function is useful to generate a writeout name for a changed file
without overwriting the current ``self.map_name``.
Parameters
----------
All possible parameters from the "Naming Convention" plus the new
prefix.
'''
source = source or self.source
telescope = telescope or self.telescope
species = species or self.species
fluxUnit = fluxUnit or self.fluxUnit
resolution = resolution or self.resolution
prefix = prefix or self.prefix
if dataFormat is False:
dataFormat = self.dataFormat
if comments is None:
comments = self.comments
elif comments is not None:
comments = self.comments + comments
if len(comments) == 0:
if dataFormat is not None:
return (str(prefix) + str(source) + '_' + str(telescope) +
'_' + str(species) + '_' + str(fluxUnit) + '_' +
self.resolutionToString(resolution) + '.' +
str(dataFormat))
if dataFormat is None:
return (str(prefix) + str(source) + '_' + str(telescope) +
'_' + str(species) + '_' + str(fluxUnit) + '_' +
self.resolutionToString(resolution))
if len(comments) != 0:
if dataFormat is not None:
return (str(prefix) + str(source) + '_' + str(telescope) +
'_' + str(species) + '_' + str(fluxUnit) + '_' +
self.resolutionToString(resolution) + '_' +
'_'.join(comments) + '.' + str(dataFormat))
if dataFormat is None:
return (str(prefix) + str(source) + '_' + str(telescope) +
'_' + str(species) + '_' + str(fluxUnit) + '_' +
self.resolutionToString(resolution) + '_' +
'_'.join(comments))
def flux_conversion(self, x=None, major=None, minor=None,
nu_or_lambda='nu', direction=None):
r"""
Calulates conversion between K.km/s and Jy/beam and vise versa.
Parameters
----------
x : float [GHz]
Wavelength/frequency. Defaults to the frequency of the loaded map,
i.e. self.frequency
major : float
Major Axis Beam (arcsec). Default None, i.e. using self.resolution.
minor : float
Minor Axis Beam(arcsec). Default None, i.e. using self.resolution.
nu_or_lambda : string
Choose type of x: frequency = ``'nu'`` or wavelength =
``'lambda'``.
direction : string
choose conversion direction ``'kelvin_to_jansky'``
means Kelvin to Jansky; ``'jansky_to_kelvin'`` Jansky to Kelvin.
Notes
-----
If self.frequency and self.resolution are correctly set, this functions
does not need any input. Otherwise this has to be given explicitly.
"""
if direction is not None and (direction != 'kelvin_to_jansky'
or direction != 'jansky_to_kelvin'):
print ('Keyword Error direction has to be kelvin_to_jansky or'
'jansky_to_kelvin -> Exit!')
if self.fluxUnit in ['JyB', 'Jy/Beam'] and direction is None:
direction = 'jansky_to_kelvin'
if self.fluxUnit in ['Tmb', 'T', 'Kkms'] and direction is None:
direction = 'kelvin_to_jansky'
print self.fluxUnit
if ((self.fluxUnit not in ['JyB', 'Jy/Beam'] and self.fluxUnit
not in ['Tmb', 'T', 'Kkms'])):
print ('Map is not in the right units has to be Jy/beam or '
'Kelvin something. -> Exit!')
sys.exit()
if nu_or_lambda == 'lambda':
if direction == 'jansky_to_kelvin':
def fcon(x, major, minor):
return units.jansky_to_kelvin(x, major,
minor, nu_or_lambda='lambda')
if direction == 'kelvin_to_jansky':
def fcon(x, major, minor):
return units.kelvin_to_jansky(x, major,
minor, nu_or_lambda='lambda')
if nu_or_lambda == 'nu':
if direction == 'jansky_to_kelvin':
def fcon(frequency, major, minor):
return units.jansky_to_kelvin(x, major,
minor, nu_or_lambda='nu')
if direction == 'kelvin_to_jansky':
def fcon(frequency, major, minor):
return units.kelvin_to_jansky(x, major,
minor, nu_or_lambda='nu')
if x is None:
if self.frequency is not np.nan:
x = self.frequency / 1e9
elif self.frequency is np.nan:
print 'No frequency information present. Can not proceed.'
if major is None:
major = self.resolution[0]
if minor is None:
minor = self.resolution[1]
return fcon(x, major, minor)
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The UltimateOnlineCash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:4771", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 4771)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:4771", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 4771)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| |
import sys
import time
from datetime import date, datetime
from decimal import Decimal
try:
basestring
except NameError:
basestring = str
class OrientRecord(object):
"""
Object that represent an Orient Document / Record
"""
oRecordData = property(lambda self: self.__o_storage)
def __str__(self):
rep = ""
if self.__o_storage:
rep = str( self.__o_storage )
if self.__o_class is not None:
rep = "'@" + str(self.__o_class) + "':" + rep + ""
if self.__version is not None:
rep = rep + ",'version':" + str(self.__version)
if self.__rid is not None:
rep = rep + ",'rid':'" + str(self.__rid) + "'"
return '{' + rep + '}'
@staticmethod
def addslashes(string):
l = [ "\\", '"', "'", "\0", ]
for i in l:
if i in string:
string = string.replace( i, '\\' + i )
return string
def __init__(self, content=None):
self.__rid = None
self.__version = None
self.__o_class = None
self.__o_storage = {}
if not content:
content = {}
for key in content.keys():
if key == '__rid': # Ex: select @rid, field from v_class
self.__rid = content[ key ]
# self.__rid = OrientRecordLink( content[ key ][ 1: ] )
elif key == '__version': # Ex: select @rid, @version from v_class
self.__version = content[key]
elif key == '__o_class':
self.__o_class = content[ key ]
elif key[0:1] == '@':
# special case dict
# { '@my_class': { 'accommodation': 'hotel' } }
self.__o_class = key[1:]
for _key, _value in content[key].items():
if isinstance(_value, basestring):
self.__o_storage[_key] = self.addslashes( _value )
else:
self.__o_storage[_key] = _value
elif key == '__o_storage':
self.__o_storage = content[key]
else:
self.__o_storage[key] = content[key]
def _set_keys(self, content=dict):
for key in content.keys():
self._set_keys( content[key] )
@property
def _in(self):
try:
return self.__o_storage['in']
except KeyError:
return None
@property
def _out(self):
try:
return self.__o_storage['out']
except KeyError:
return None
@property
def _rid(self):
return self.__rid
@property
def _version(self):
return self.__version
@property
def _class(self):
return self.__o_class
def update(self, **kwargs):
self.__rid = kwargs.get('__rid', None)
self.__version = kwargs.get('__version', None)
if self.__o_class is None:
self.__o_class = kwargs.get('__o_class', None)
""" This method is for backward compatibility when someone
use 'getattr(record, a_key)' """
def __getattr__(self, item):
"""
:param item: string
:return: mixed
:raise: AttributeError
"""
try:
return self.__o_storage[item]
except KeyError:
raise AttributeError( "'OrientRecord' object has no attribute "
"'" + item + "'" )
class OrientRecordLink(object):
def __init__(self, recordlink):
cid, rpos = recordlink.split(":")
self.__link = recordlink
self.clusterID = cid
self.recordPosition = rpos
def __str__(self):
return self.get_hash()
def get(self):
return self.__link
def get_hash(self):
return "#%s" % self.__link
class OrientBinaryObject(object):
"""
This will be a RidBag
"""
def __init__(self, stri):
self.b64 = stri
def get_hash(self):
return "_" + self.b64 + "_"
def getBin(self):
import base64
return base64.b64decode(self.b64)
class OrientCluster(object):
def __init__(self, name, cluster_id, cluster_type=None, segment=None):
"""
Information regarding a Cluster on the Orient Server
:param name: str name of the cluster
:param id: int id of the cluster
:param type: cluster type (only for version <24 of the protocol)
:param segment: cluster segment (only for version <24 of the protocol)
"""
#: str name of the cluster
self.name = name
#: int idof the cluster
self.id = cluster_id
self.type = cluster_type
self.segment = segment
def __str__(self):
return "%s: %d" % (self.name, self.id)
def __eq__(self, other):
return self.name == other.name and self.id == other.id
def __ne__(self, other):
return self.name != other.name or self.id != other.id
class OrientVersion(object):
def __init__(self, release):
"""
Object representing Orient db release Version
:param release: String release
"""
#: string full OrientDB release
self.release = release
#: Major version
self.major = None
#: Minor version
self.minor = None
#: build number
self.build = None
self._parse_version(release)
def _parse_version( self, string_release ):
if not isinstance(string_release, str):
string_release = string_release.decode()
try:
version_info = string_release.split( "." )
self.major = int( version_info[0] )
self.minor = version_info[1]
self.build = version_info[2]
except IndexError:
pass
if "-" in self.minor:
_temp = self.minor.split( "-" )
self.minor = int( _temp[0] )
self.build = _temp[1]
else:
self.minor = int( self.minor )
build = self.build.split( " ", 1 )[0]
try:
build = int( build )
except ValueError:
pass
self.build = build
def __str__(self):
return self.release
class OrientNode(object):
def __init__(self, node_dict=None):
"""
Represent a server node in a multi clusered configuration
TODO: extends this object with different listeners if we're going to support in the driver an abstarction of the HTTP protocol, for now we are not interested in that
:param node_dict: dict with starting configs (usaully from a db_open, db_reload record response)
"""
#: node name
self.name = None
#: node is
self.id = None
#: datetime object the node was started
self.started_on = None
#: binary listener host
self.host = None
#: binary lister port
self.port = None
if node_dict is not None:
self._parse_dict(node_dict)
def _parse_dict(self, node_dict):
self.id = node_dict['id']
self.name = node_dict['name']
self.started_on = node_dict['startedOn']
listener = None
for l in node_dict['listeners']:
if l['protocol'] == 'ONetworkProtocolBinary':
listener = l
break
if listener:
listen = listener['listen'].split(':')
self.host = listen[0]
self.port = listen[1]
def __str__(self):
return self.name
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
start_nodes,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
'''
Test plan:
- Start digibyted's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on digibyted side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create digibyteds that connect to them
- Manipulate the digibyteds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
print("Warning: testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: digibyted's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: digibyted's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("digibyteostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"digibyteostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| |
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This customization adds the following scalar parameters to the
authorize operations:
* --protocol: tcp | udp | icmp or any protocol number
* --port: A single integer or a range (min-max). You can specify ``all``
to mean all ports (for example, port range 0-65535)
* --source-group: Either the source security group ID or name.
* --cidr - The CIDR range. Cannot be used when specifying a source or
destination security group.
"""
from awscli.arguments import CustomArgument
def _add_params(argument_table, **kwargs):
arg = ProtocolArgument('protocol',
help_text=PROTOCOL_DOCS)
argument_table['protocol'] = arg
argument_table['ip-protocol']._UNDOCUMENTED = True
arg = PortArgument('port', help_text=PORT_DOCS)
argument_table['port'] = arg
# Port handles both the from-port and to-port,
# we need to not document both args.
argument_table['from-port']._UNDOCUMENTED = True
argument_table['to-port']._UNDOCUMENTED = True
arg = CidrArgument('cidr', help_text=CIDR_DOCS)
argument_table['cidr'] = arg
argument_table['cidr-ip']._UNDOCUMENTED = True
arg = SourceGroupArgument('source-group',
help_text=SOURCEGROUP_DOCS)
argument_table['source-group'] = arg
argument_table['source-security-group-name']._UNDOCUMENTED = True
arg = GroupOwnerArgument('group-owner',
help_text=GROUPOWNER_DOCS)
argument_table['group-owner'] = arg
argument_table['source-security-group-owner-id']._UNDOCUMENTED = True
def _check_args(parsed_args, **kwargs):
# This function checks the parsed args. If the user specified
# the --ip-permissions option with any of the scalar options we
# raise an error.
arg_dict = vars(parsed_args)
if arg_dict['ip_permissions']:
for key in ('protocol', 'port', 'cidr',
'source_group', 'group_owner'):
if arg_dict[key]:
msg = ('The --%s option is not compatible '
'with the --ip-permissions option ') % key
raise ValueError(msg)
def _add_docs(help_command, **kwargs):
doc = help_command.doc
doc.style.new_paragraph()
doc.style.start_note()
msg = ('To specify multiple rules in a single command '
'use the <code>--ip-permissions</code> option')
doc.include_doc_string(msg)
doc.style.end_note()
EVENTS = [
('building-argument-table.ec2.authorize-security-group-ingress',
_add_params),
('building-argument-table.ec2.authorize-security-group-egress',
_add_params),
('building-argument-table.ec2.revoke-security-group-ingress', _add_params),
('building-argument-table.ec2.revoke-security-group-egress', _add_params),
('operation-args-parsed.ec2.authorize-security-group-ingress',
_check_args),
('operation-args-parsed.ec2.authorize-security-group-egress', _check_args),
('operation-args-parsed.ec2.revoke-security-group-ingress', _check_args),
('operation-args-parsed.ec2.revoke-security-group-egress', _check_args),
('doc-description.ec2.authorize-security-group-ingress', _add_docs),
('doc-description.ec2.authorize-security-group-egress', _add_docs),
('doc-description.ec2.revoke-security-group-ingress', _add_docs),
('doc-description.ec2.revoke-security-groupdoc-ingress', _add_docs),
]
PROTOCOL_DOCS = ('<p>The IP protocol of this permission.</p>'
'<p>Valid protocol values: <code>tcp</code>, '
'<code>udp</code>, <code>icmp</code></p>')
PORT_DOCS = ('<p>For TCP or UDP: The range of ports to allow.'
' A single integer or a range (<code>min-max</code>).</p>'
'<p>For ICMP: A single integer or a range (<code>type-code</code>)'
' representing the ICMP type'
' number and the ICMP code number respectively.'
' A value of -1 indicates all ICMP codes for'
' all ICMP types. A value of -1 just for <code>type</code>'
' indicates all ICMP codes for the specified ICMP type.</p>')
CIDR_DOCS = '<p>The CIDR IP range.</p>'
SOURCEGROUP_DOCS = ('<p>The name or ID of the source security group. '
'Cannot be used when specifying a CIDR IP address.')
GROUPOWNER_DOCS = ('<p>The AWS account ID that owns the source security '
'group. Cannot be used when specifying a CIDR IP '
'address.</p>')
def register_secgroup(event_handler):
for event, handler in EVENTS:
event_handler.register(event, handler)
def _build_ip_permissions(params, key, value):
if 'IpPermissions' not in params:
params['IpPermissions'] = [{}]
if key == 'CidrIp':
if 'IpRanges' not in params['ip_permissions'][0]:
params['IpPermissions'][0]['IpRanges'] = []
params['IpPermissions'][0]['IpRanges'].append(value)
elif key in ('GroupId', 'GroupName', 'UserId'):
if 'UserIdGroupPairs' not in params['IpPermissions'][0]:
params['IpPermissions'][0]['UserIdGroupPairs'] = [{}]
params['IpPermissions'][0]['UserIdGroupPairs'][0][key] = value
else:
params['IpPermissions'][0][key] = value
class ProtocolArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value:
try:
int_value = int(value)
if (int_value < 0 or int_value > 255) and int_value != -1:
msg = ('protocol numbers must be in the range 0-255 '
'or -1 to specify all protocols')
raise ValueError(msg)
except ValueError:
if value not in ('tcp', 'udp', 'icmp', 'all'):
msg = ('protocol parameter should be one of: '
'tcp|udp|icmp|all or any valid protocol number.')
raise ValueError(msg)
if value == 'all':
value = '-1'
_build_ip_permissions(parameters, 'IpProtocol', value)
class PortArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value:
try:
if value == '-1' or value == 'all':
fromstr = '-1'
tostr = '-1'
elif '-' in value:
# We can get away with simple logic here because
# argparse will not allow values such as
# "-1-8", and these aren't actually valid
# values any from from/to ports.
fromstr, tostr = value.split('-', 1)
else:
fromstr, tostr = (value, value)
_build_ip_permissions(parameters, 'FromPort', int(fromstr))
_build_ip_permissions(parameters, 'ToPort', int(tostr))
except ValueError:
msg = ('port parameter should be of the '
'form <from[-to]> (e.g. 22 or 22-25)')
raise ValueError(msg)
class CidrArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value:
value = [{'CidrIp': value}]
_build_ip_permissions(parameters, 'IpRanges', value)
class SourceGroupArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value:
if value.startswith('sg-'):
_build_ip_permissions(parameters, 'GroupId', value)
else:
_build_ip_permissions(parameters, 'GroupName', value)
class GroupOwnerArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value:
_build_ip_permissions(parameters, 'UserId', value)
| |
"""
Copyright (c) 2016 Gianluca Gerard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Portions of the code are
Copyright (c) 2010--2015, Deep Learning Tutorials Development Team
All rights reserved.
"""
from __future__ import print_function, division
import os
import shutil
import datetime
from hashlib import md5
import numpy
import theano
import sys
import mdbnlogging
from utils import load_n_preprocess_data
from utils import find_unique_classes
from utils import write_config
from dbn import DBN
from six.moves import cPickle
from botocore.exceptions import ClientError
from decimal import Decimal
def train_dbn(train_set, validation_set,
name="",
gauss=True,
batch_size=20,
k=1, p=0.5,
layers_sizes=[40],
pretraining_epochs=[800],
pretrain_lr=[0.005],
lambdas = [0.01, 0.1],
rng=None,
run=0,
persistent=False,
verbose=False,
graph_output=False
):
mdbnlogging.info('RUN:%i:DBN:%s:visible nodes:%i' % (run, name, train_set.get_value().shape[1]))
mdbnlogging.info('RUN:%i:DBN:%s:output nodes:%i' % (run, name, layers_sizes[-1]))
dbn = DBN(name=name, numpy_rng=rng, n_ins=train_set.get_value().shape[1],
p=p,
gauss=gauss,
hidden_layers_sizes=layers_sizes[:-1],
n_outs=layers_sizes[-1])
dbn.training(train_set_x=train_set,
batch_size=batch_size, k=k,
pretraining_epochs=pretraining_epochs,
pretrain_lr=pretrain_lr,
lambdas=lambdas,
persistent=persistent,
run=run,
verbose=verbose,
validation_set_x=validation_set,
graph_output=graph_output)
output_train_set = dbn.get_output(train_set)
if validation_set is not None:
output_val_set = dbn.get_output(validation_set)
else:
output_val_set = None
return dbn, output_train_set, output_val_set
class MDBN(object):
def __init__(self,
batch_dir_prefix="batch",
holdout_fraction=0,
repeats=1,
log_enabled=False,
verbose=False,
s3_bucket=None,
dyndb_table=None,
batch_start_date_str=None,
output_dir="MDBN_run"):
self.holdout_fraction = holdout_fraction
self.repeats = 1
self.log_enabled = log_enabled
self.verbose = verbose
self.batch_dir_prefix = batch_dir_prefix
self.output_dir = output_dir
self.s3_bucket = s3_bucket
self.dyndb_table = dyndb_table
if batch_start_date_str is None:
batch_start_date = datetime.datetime.utcnow()
batch_start_date_str = batch_start_date.strftime("%Y%m%dT%H%M%S")
# batch_start_date_str = batch_start_date.isoformat()
self.batch_start_date_str=batch_start_date_str
def run(self, config, datafiles):
uuid = config['uuid']
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
batch_output_dir = '%s/%s_%s' % \
(self.output_dir, self.batch_dir_prefix, self.batch_start_date_str)
if not os.path.isdir(batch_output_dir):
os.mkdir(batch_output_dir)
if self.verbose:
log_level = mdbnlogging.DEBUG
else:
log_level = mdbnlogging.INFO
last_run = 0
n_classes = []
if self.dyndb_table is not None:
try:
response = self.dyndb_table.get_item(
Key={
'uuid': uuid,
'timestamp': self.batch_start_date_str
}
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
item = response['Item']
last_run = item['n_runs']
n_classes = item['n_classes']
for run in range(last_run,config["runs"]):
rng = numpy.random.RandomState(config["seed"]+run*1000)
if self.log_enabled:
mdbnlogging.basicConfig(filename=batch_output_dir + '/run.%s.log' % run,
log_level=log_level)
else:
mdbnlogging.basicConfig(log_level=log_level)
mdbnlogging.info("CONFIG_UUID:%s" % uuid)
# try:
run_start_date = datetime.datetime.now()
mdbnlogging.info('RUN:%i:start date:%s:start time:%s' % (run,
run_start_date.strftime("%Y.%m.%d"),
run_start_date.strftime("%H.%M.%S")))
dbn_output = self.train(config, datafiles,
run=run,
output_folder=batch_output_dir,
network_file='Exp_%s_run_%d.npz' %
(uuid, run),
rng=rng)
current_date_time = datetime.datetime.now()
classes = find_unique_classes((dbn_output > 0.5) * numpy.ones_like(dbn_output))
n_classes.append(Decimal(numpy.max(classes[0])+1))
mdbnlogging.info('RUN:%i:classes identified:%d' % (run, n_classes[-1]))
mdbnlogging.info('RUN:%i:stop date:%s:stop time:%s' % (run,
current_date_time.strftime("%Y.%m.%d"),
current_date_time.strftime("%H.%M.%S")))
# except:
# logging.error('RUN:%i:unexpected error:%s' % (run, sys.exc_info()[0]))
# logging.error('RUN:%i:unexpected error:%s' % (run, sys.exc_info()[1]))
# traceback.format_exc()
if self.s3_bucket is not None:
try:
self.s3_bucket.upload_file(batch_output_dir + '/' +
'Results_%s.npz' % uuid,
batch_output_dir + '/' +
'Results_%s.npz' % uuid)
self.s3_bucket.upload_file(batch_output_dir + '/run.%s.log' % run,
batch_output_dir + '/run.%s.log' % run)
except OSError as e:
print("OS error ({0}): {1}".format(e.errno, e.strerror))
except:
print("Could not transfer %s to s3" % batch_output_dir, file=sys.stderr)
if self.dyndb_table is not None:
self.dyndb_table.update_item( # Update the run just completed in DynamoDB
Key={
'uuid': uuid,
'timestamp': self.batch_start_date_str
},
UpdateExpression="set n_runs = n_runs + :val, n_classes = :c, job_status = :s",
ExpressionAttributeValues={
':val': Decimal(1),
':c': n_classes,
':s': 'IN_PROGRESS'
}
)
root_dir = os.getcwd()
os.chdir(batch_output_dir)
write_config(config, config['name'])
os.chdir(root_dir)
if self.s3_bucket is not None:
try:
self.s3_bucket.upload_file(batch_output_dir + '/' + config['name'],
batch_output_dir + '/' + config['name'])
shutil.rmtree(batch_output_dir)
except OSError as e:
print("OS error ({0}): {1}".format(e.errno, e.strerror))
except:
print("Could not transfer %s to s3" % batch_output_dir, file=sys.stderr)
if self.dyndb_table is not None:
self.dyndb_table.update_item( # Update the run just completed in DynamoDB
Key={
'uuid': uuid,
'timestamp': self.batch_start_date_str
},
UpdateExpression="set job_status = :s",
ExpressionAttributeValues={
':s': 'DONE'
}
)
return n_classes
def train(self,
config,
datafiles,
run=0,
graph_output=False,
datadir='data',
output_folder='MDBN_run',
network_file='network.npz',
tmp_folder='tmp',
rng=None):
"""
:param datafiles: a dictionary with the path to the unimodal datasets
:param datadir: directory where the datasets are located
:param holdout: percentage of samples used for validation. By default there
is no validation set
:param repeats: repeat each sample repeats time to artifically increase the size
of each dataset. By default data is not repeated
:param graph_output: if True it will output graphical representation of the
network parameters
:param output_folder: directory where the results are stored
:param network_file: name of the file where the parameters are saved at the end
of the training
:param rng: random number generator, by default is None and it is initialized
by the function
"""
if rng is None:
rng = numpy.random.RandomState(config["seed"])
if not os.path.isdir(tmp_folder):
os.mkdir(tmp_folder)
#################################
# Training the RBM #
#################################
dbn_dict = dict()
output_t_list = []
output_v_list = []
for pathway in config["pathways"]:
mdbnlogging.info('RUN:%i:DBN:%s:start training' % (run, pathway))
train_set, validation_set = load_n_preprocess_data(datafiles[pathway],
holdout_fraction=self.holdout_fraction,
repeats=self.repeats,
datadir=datadir,
rng=rng)
netConfig = config['dbns'][pathway]
netConfig['inputNodes'] = train_set.get_value().shape[1]
config_hash = md5(str(netConfig.values()+[config['seed'],config['p']])).hexdigest()
dump_file = '%s/dbn_%s_%s_%d.save' % (tmp_folder, pathway, config_hash, run)
if os.path.isfile(dump_file):
with open(dump_file, 'rb') as f:
dbn_dict[pathway] = cPickle.load(f)
else:
dbn_dict[pathway], _, _ = train_dbn(
train_set, validation_set,
name=pathway,
gauss=True,
batch_size=netConfig["batchSize"],
k=netConfig["k"],
p=config["p"],
layers_sizes=netConfig["layersNodes"],
pretraining_epochs=netConfig["epochs"],
pretrain_lr=netConfig["lr"],
lambdas=netConfig["lambdas"],
rng=rng,
persistent=netConfig["persistent"],
run=run,
verbose=self.verbose,
graph_output=graph_output)
with open(dump_file, 'wb') as f:
cPickle.dump(dbn_dict[pathway], f, protocol=cPickle.HIGHEST_PROTOCOL)
for layer in range(dbn_dict[pathway].n_layers):
rbm = dbn_dict[pathway].rbm_layers[layer]
mdbnlogging.info('RUN:%i:DBN:%s:layer:%i:epoch:%i:minimum cost:%f' %
(run, rbm.name, layer, rbm.training_end_state[0], rbm.training_end_state[1]))
output_t, output_v = dbn_dict[pathway].\
MLP_output_from_datafile(datafiles[pathway],
holdout=self.holdout_fraction,
repeats=self.repeats)
output_t_list.append(output_t)
output_v_list.append(output_v)
joint_train_set = theano.shared(numpy.hstack(output_t_list), borrow=True)
mdbnlogging.info('RUN:%i:DBN:top:start training' % run)
if self.holdout_fraction > 0:
joint_val_set = theano.shared(numpy.hstack(output_v_list), borrow=True)
else:
joint_val_set = None
netConfig = config['top']
netConfig['inputNodes'] = joint_train_set.get_value().shape[1]
config_hash = md5(str(config.values())).hexdigest()
dump_file = 'tmp/dbn_top_%s_%d.save' % (config_hash, run)
if os.path.isfile(dump_file):
with open(dump_file, 'rb') as f:
dbn_dict['top'] = cPickle.load(f)
else:
dbn_dict['top'], _, _ = train_dbn(joint_train_set, joint_val_set,
name='top',
gauss=False,
batch_size=netConfig["batchSize"],
k=netConfig["k"],
p=config["p"],
layers_sizes=netConfig["layersNodes"],
pretraining_epochs=netConfig["epochs"],
pretrain_lr=netConfig["lr"],
rng=rng,
persistent=netConfig["persistent"],
run=run,
verbose=self.verbose,
graph_output=graph_output)
with open(dump_file, 'wb') as f:
cPickle.dump(dbn_dict['top'], f, protocol=cPickle.HIGHEST_PROTOCOL)
# Computing the top-level output
unimodal_dbn_output_list = []
for pathway in config["pathways"]:
dbn_output, _ = dbn_dict[pathway].MLP_output_from_datafile(datafiles[pathway])
unimodal_dbn_output_list.append(dbn_output)
joint_layer = theano.shared(numpy.hstack(unimodal_dbn_output_list), borrow=True)
final_outuput = dbn_dict['top'].get_output(joint_layer)
self.save_network(config, final_outuput, dbn_dict, network_file, output_folder)
return final_outuput
def save_network(self, config, top_output, dbn_dict, network_file, output_folder):
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
root_dir = os.getcwd()
os.chdir(output_folder)
dbn_params = {}
for n in config['pathways']+['top']:
dbn = dbn_dict[n]
params = {}
for p in dbn.params:
if p.name in params:
params[p.name].append(p.get_value())
else:
params[p.name] = [p.get_value()]
dbn_params[n] = params
numpy.savez(network_file,
holdout=self.holdout_fraction,
repeats=self.repeats,
config=config,
classes=top_output,
dbn_params=dbn_params
)
os.chdir(root_dir)
if self.s3_bucket is not None:
try:
self.s3_bucket.upload_file(output_folder + '/' + network_file,
output_folder + '/' + network_file)
except:
print("Could not transfer %s on s3" % network_file, file=sys.stderr)
def load_network(self, input_file, input_folder):
root_dir = os.getcwd()
# TODO: check if the input_folder exists
os.chdir(input_folder)
if self.s3_bucket is not None:
self.s3_bucket.download_file(input_file,
input_folder + '/' + input_file)
npz = numpy.load(input_file)
config = npz['config'].tolist()
dbn_params = npz['dbn_params'].tolist()
dbn_dict = {}
for key in config['pathways']+["top"]:
params=dbn_params[key]
if key != "top":
netConfig = config['dbns'][key]
else:
netConfig = config['top']
layer_sizes = netConfig['layersNodes']
dbn_dict[key] = DBN(n_ins=netConfig['inputNodes'],
hidden_layers_sizes=layer_sizes[:-1],
gauss=key!='top',
n_outs=layer_sizes[-1],
W_list=params['W'],b_list=params['hbias'],c_list=params['vbias'])
os.chdir(root_dir)
return config, dbn_dict
| |
import django
from django.contrib import admin, auth
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied,\
ValidationError
from django.http.request import QueryDict
from django.test import TestCase
from django.test.client import RequestFactory
import six
class ModelAdminCheckException(Exception):
def __init__(self, message, original_exception):
self.original_exception = original_exception
return super(ModelAdminCheckException, self).__init__(message)
def for_all_model_admins(fn):
def test_deco(self):
for model, model_admin in self.modeladmins:
if model_admin.__class__ in self.exclude_modeladmins:
continue
if model._meta.app_label in self.exclude_apps:
continue
try:
fn(self, model, model_admin)
except Exception as e:
six.raise_from(ModelAdminCheckException(
"Above exception occured while running test '%s' "
"on modeladmin %s (%s)" %
(fn.__name__, model_admin, model.__name__),
e), e)
return test_deco
class AdminSiteSmokeTestMixin(object):
modeladmins = None
exclude_apps = []
exclude_modeladmins = []
fixtures = ['django_admin_smoke_tests']
single_attributes = ['date_hierarchy']
iter_attributes = [
'filter_horizontal',
'filter_vertical',
'list_display',
'list_display_links',
'list_editable',
'list_filter',
'readonly_fields',
'search_fields',
]
iter_or_falsy_attributes = [
'exclude',
'fields',
'ordering',
]
strip_minus_attrs = ('ordering',)
def setUp(self):
super(AdminSiteSmokeTestMixin, self).setUp()
self.superuser = auth.get_user_model().objects.create_superuser(
'testuser', 'testuser@example.com', 'foo')
self.factory = RequestFactory()
if not self.modeladmins:
self.modeladmins = admin.site._registry.items()
try:
admin.autodiscover()
except:
pass
def get_request(self, params=None):
request = self.factory.get('/', params)
request.user = self.superuser
return request
def post_request(self, post_data={}, params=None):
request = self.factory.post('/', params, post_data=post_data)
request.user = self.superuser
request._dont_enforce_csrf_checks = True
return request
def strip_minus(self, attr, val):
if attr in self.strip_minus_attrs and val[0] == '-':
val = val[1:]
return val
def get_fieldsets(self, model, model_admin):
request = self.get_request()
return model_admin.get_fieldsets(request, obj=model())
def get_attr_set(self, model, model_admin):
attr_set = []
for attr in self.iter_attributes:
attr_set += [
self.strip_minus(attr, a)
for a in getattr(model_admin, attr)
]
for attr in self.iter_or_falsy_attributes:
attrs = getattr(model_admin, attr, None)
if isinstance(attrs, list) or isinstance(attrs, tuple):
attr_set += [self.strip_minus(attr, a) for a in attrs]
for fieldset in self.get_fieldsets(model, model_admin):
for attr in fieldset[1]['fields']:
if isinstance(attr, list) or isinstance(attr, tuple):
attr_set += [self.strip_minus(fieldset, a)
for a in attr]
else:
attr_set.append(attr)
attr_set = set(attr_set)
for attr in self.single_attributes:
val = getattr(model_admin, attr, None)
if val:
attr_set.add(self.strip_minus(attr, val))
return attr_set
@for_all_model_admins
def test_specified_fields(self, model, model_admin):
attr_set = self.get_attr_set(model, model_admin)
# FIXME: not all attributes can be used everywhere (e.g. you can't
# use list_filter with a form field). This will have to be fixed
# later.
try:
model_field_names = frozenset(model._meta.get_fields())
except AttributeError: # Django<1.10
model_field_names = frozenset(
model._meta.get_all_field_names()
)
form_field_names = frozenset(getattr(model_admin.form,
'base_fields', []))
model_instance = model()
for attr in attr_set:
# for now we'll just check attributes, not strings
if not isinstance(attr, six.string_types):
continue
# don't split attributes that start with underscores (such as
# __str__)
if attr[0] != '_':
attr = attr.split('__')[0]
has_model_field = attr in model_field_names
has_form_field = attr in form_field_names
has_model_class_attr = hasattr(model_instance.__class__, attr)
has_admin_attr = hasattr(model_admin, attr)
try:
has_model_attr = hasattr(model_instance, attr)
except (ValueError, ObjectDoesNotExist):
has_model_attr = attr in model_instance.__dict__
has_field_or_attr = has_model_field or has_form_field or\
has_model_attr or has_admin_attr or has_model_class_attr
self.assertTrue(has_field_or_attr, '%s not found on %s (%s)' %
(attr, model, model_admin,))
@for_all_model_admins
def test_queryset(self, model, model_admin):
request = self.get_request()
# TODO: use model_mommy to generate a few instances to query against
# make sure no errors happen here
if hasattr(model_admin, 'get_queryset'):
list(model_admin.get_queryset(request))
@for_all_model_admins
def test_get_absolute_url(self, model, model_admin):
if hasattr(model, 'get_absolute_url'):
# Use fixture data if it exists
instance = model.objects.first()
# Otherwise create a minimal instance
if not instance:
instance = model(pk=1)
# make sure no errors happen here
instance.get_absolute_url()
@for_all_model_admins
def test_changelist_view(self, model, model_admin):
request = self.get_request()
# make sure no errors happen here
try:
response = model_admin.changelist_view(request)
response.render()
self.assertEqual(response.status_code, 200)
except PermissionDenied:
# this error is commonly raised by ModelAdmins that don't allow
# changelist view
pass
@for_all_model_admins
def test_changelist_view_search(self, model, model_admin):
request = self.get_request(params=QueryDict('q=test'))
# make sure no errors happen here
try:
response = model_admin.changelist_view(request)
response.render()
self.assertEqual(response.status_code, 200)
except PermissionDenied:
# this error is commonly raised by ModelAdmins that don't allow
# changelist view.
pass
@for_all_model_admins
def test_add_view(self, model, model_admin):
request = self.get_request()
# make sure no errors happen here
try:
response = model_admin.add_view(request)
if isinstance(response, django.template.response.TemplateResponse):
response.render()
self.assertEqual(response.status_code, 200)
except PermissionDenied:
# this error is commonly raised by ModelAdmins that don't allow
# adding.
pass
@for_all_model_admins
def test_change_view(self, model, model_admin):
item = model.objects.last()
if not item or model._meta.proxy:
return
pk = item.pk
request = self.get_request()
# make sure no errors happen here
response = model_admin.change_view(request, object_id=str(pk))
if isinstance(response, django.template.response.TemplateResponse):
response.render()
self.assertEqual(response.status_code, 200)
@for_all_model_admins
def test_change_post(self, model, model_admin):
item = model.objects.last()
if not item or model._meta.proxy:
return
pk = item.pk
# TODO: If we generate default post_data for post request,
# the test would be stronger
request = self.post_request()
try:
response = model_admin.change_view(request, object_id=str(pk))
if isinstance(response, django.template.response.TemplateResponse):
response.render()
self.assertEqual(response.status_code, 200)
except ValidationError:
# This the form was sent, but did not pass it's validation
pass
class AdminSiteSmokeTest(AdminSiteSmokeTestMixin, TestCase):
pass
| |
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import models
def classify_issue(repo, number):
'''
Classify an issue in a repo based on events in Datastore.
Args:
repo: string
number: int
Returns:
is_pr: bool
is_open: bool
involved: list of strings representing usernames involved
payload: a dict, see full description for classify below.
last_event_timestamp: the timestamp of the most recent event.
'''
ancestor = models.GithubResource.make_key(repo, number)
events = list(models.GithubWebhookRaw.query(ancestor=ancestor))
events.sort(key=lambda e: e.timestamp)
logging.debug('classifying %s %s (%d events)', repo, number, len(events))
event_pairs = [(event.event, json.loads(event.body)) for event in events]
last_event_timestamp = events[-1].timestamp
merged = get_merged(event_pairs)
statuses = None
if 'head' in merged:
statuses = {}
for status in models.GHStatus.query_for_sha(repo, merged['head']['sha']):
last_event_timestamp = max(last_event_timestamp, status.updated_at)
statuses[status.context] = [
status.state, status.target_url, status.description]
return list(classify(event_pairs, statuses)) + [last_event_timestamp]
def get_merged(events):
'''
Determine the most up-to-date view of the issue given its inclusion
in a series of events.
Note that different events have different levels of detail-- comments
don't include head SHA information, pull request events don't have label
information, etc.
Args:
events: a list of (event_type str, event_body dict) pairs.
Returns:
body: a dict representing the issue's latest state.
'''
merged = {}
for _event, body in events:
if 'issue' in body:
merged.update(body['issue'])
if 'pull_request' in body:
merged.update(body['pull_request'])
return merged
def get_labels(events):
'''
Determine the labels applied to an issue.
Args:
events: a list of (event_type str, event_body dict) pairs.
Returns:
labels: the currently applied labels as {label_name: label_color}
'''
labels = []
for event, body in events:
if 'issue' in body:
# issues come with labels, so we can update here
labels = body['issue']['labels']
# pull_requests don't include their full labels :(
action = body.get('action')
if event == 'pull_request':
# Pull request label events don't come with a full label set.
# Track them explicitly here.
try:
if action in ('labeled', 'unlabeled') and 'label' not in body:
logging.warning('label event with no labels (multiple changes?)')
elif action == 'labeled':
label = body['label']
if label not in labels:
labels.append(label)
elif action == 'unlabeled':
label = body['label']
if label in labels:
labels.remove(label)
except:
logging.exception('??? %r', body)
raise
return {label['name']: label['color'] for label in labels}
def get_skip_comments(events, skip_users=None):
'''
Determine comment ids that should be ignored, either because of
deletion or because the user should be skipped.
Args:
events: a list of (event_type str, event_body dict) pairs.
Returns:
comment_ids: a set of comment ids that were deleted or made by
users that should be skiped.
'''
if skip_users is None:
skip_users = []
skip_comments = set()
for event, body in events:
action = body.get('action')
if event in ('issue_comment', 'pull_request_review_comment'):
comment_id = body['comment']['id']
if action == 'deleted' or body['sender']['login'] in skip_users:
skip_comments.add(comment_id)
return skip_comments
def classify(events, statuses=None):
'''
Given an event-stream for an issue and status-getter, process
the events and determine what action should be taken, if any.
Args:
events: a list of (event_type str, event_body dict) pairs.
Returns:
is_pr: bool
is_open: bool
involved: list of strings representing usernames involved
payload: a dictionary of additional information, including:
{
'author': str author_name,
'title': str issue title,
'labels': {label_name: label_color},
'attn': {user_name: reason},
'mergeable': bool,
'comments': [{'user': str name, 'comment': comment, 'timestamp': str iso8601}]
}
'''
merged = get_merged(events)
labels = get_labels(events)
is_pr = 'head' in merged or 'pull_request' in merged
is_open = merged['state'] != 'closed'
author = merged['user']['login']
assignees = sorted(assignee['login'] for assignee in merged['assignees'])
involved = [author] + assignees
payload = {
'author': author,
'assignees': assignees,
'title': merged['title'],
'labels': labels,
}
if is_pr:
if is_open:
payload['needs_rebase'] = 'needs-rebase' in labels or merged.get('mergeable') == 'false'
payload['additions'] = merged.get('additions', 0)
payload['deletions'] = merged.get('deletions', 0)
if 'head' in merged:
payload['head'] = merged['head']['sha']
if statuses:
payload['status'] = statuses
payload['attn'] = calculate_attention(distill_events(events), payload)
return is_pr, is_open, involved, payload
def distill_events(events):
'''
Given a sequence of events, return a series of user-action tuples
relevant to determining user state.
'''
skip_comments = get_skip_comments(events, ['k8s-bot'])
output = []
for event, body in events:
action = body.get('action')
user = body.get('sender', {}).get('login')
if event in ('issue_comment', 'pull_request_review_comment'):
if body['comment']['id'] in skip_comments:
continue
if action == 'created':
output.append(('comment', user))
if event == 'pull_request':
if action in ('opened', 'reopened', 'synchronize'):
output.append(('push', user))
if action == 'labeled' and 'label' in body:
output.append(('label ' + body['label']['name'].lower(), user))
return output
def get_author_state(author, distilled_events):
'''
Determine the state of the author given a series of distilled events.
'''
state = 'waiting'
for action, user in distilled_events:
if state == 'waiting':
if action == 'comment' and user != author:
state = 'address comments'
elif state == 'address comments':
if action == 'push':
state = 'waiting'
elif action == 'comment' and user == author:
state = 'waiting'
return state
def get_assignee_state(assignee, distilled_events):
'''
Determine the state of an assignee given a series of distilled events.
'''
state = 'needs review'
for action, user in distilled_events:
if state == 'needs review':
if user == assignee:
if action == 'comment':
state = 'waiting'
if action == 'label lgtm':
state = 'waiting'
elif state == 'waiting':
if action == 'push':
state = 'needs review'
return state
def calculate_attention(distilled_events, payload):
'''
Given information about an issue, determine who should look at it.
'''
author = payload['author']
assignees = payload['assignees']
attn = {}
def notify(to, reason):
attn[to] = reason
if any(state == 'failure' for state, _url, _desc
in payload.get('status', {}).values()):
notify(author, 'fix tests')
for assignee in assignees:
assignee_state = get_assignee_state(assignee, distilled_events)
if assignee_state != 'waiting':
notify(assignee, assignee_state)
author_state = get_author_state(author, distilled_events)
if author_state != 'waiting':
notify(author, author_state)
if payload.get('needs_rebase'):
notify(author, 'needs rebase')
if 'release-note-label-needed' in payload['labels']:
notify(author, 'needs release-note label')
return attn
| |
"""Constants for the Xiaomi Miio component."""
DOMAIN = "xiaomi_miio"
# Config flow
CONF_FLOW_TYPE = "config_flow_device"
CONF_GATEWAY = "gateway"
CONF_DEVICE = "device"
CONF_MODEL = "model"
CONF_MAC = "mac"
CONF_CLOUD_USERNAME = "cloud_username"
CONF_CLOUD_PASSWORD = "cloud_password"
CONF_CLOUD_COUNTRY = "cloud_country"
CONF_MANUAL = "manual"
# Options flow
CONF_CLOUD_SUBDEVICES = "cloud_subdevices"
# Keys
KEY_COORDINATOR = "coordinator"
KEY_DEVICE = "device"
# Attributes
ATTR_AVAILABLE = "available"
# Status
SUCCESS = ["ok"]
# Cloud
SERVER_COUNTRY_CODES = ["cn", "de", "i2", "ru", "sg", "us"]
DEFAULT_CLOUD_COUNTRY = "cn"
# Fan Models
MODEL_AIRPURIFIER_2H = "zhimi.airpurifier.mc2"
MODEL_AIRPURIFIER_2S = "zhimi.airpurifier.mc1"
MODEL_AIRPURIFIER_3 = "zhimi.airpurifier.ma4"
MODEL_AIRPURIFIER_3C = "zhimi.airpurifier.mb4"
MODEL_AIRPURIFIER_3H = "zhimi.airpurifier.mb3"
MODEL_AIRPURIFIER_M1 = "zhimi.airpurifier.m1"
MODEL_AIRPURIFIER_M2 = "zhimi.airpurifier.m2"
MODEL_AIRPURIFIER_MA1 = "zhimi.airpurifier.ma1"
MODEL_AIRPURIFIER_MA2 = "zhimi.airpurifier.ma2"
MODEL_AIRPURIFIER_PRO = "zhimi.airpurifier.v6"
MODEL_AIRPURIFIER_PROH = "zhimi.airpurifier.va1"
MODEL_AIRPURIFIER_PRO_V7 = "zhimi.airpurifier.v7"
MODEL_AIRPURIFIER_SA1 = "zhimi.airpurifier.sa1"
MODEL_AIRPURIFIER_SA2 = "zhimi.airpurifier.sa2"
MODEL_AIRPURIFIER_V1 = "zhimi.airpurifier.v1"
MODEL_AIRPURIFIER_V2 = "zhimi.airpurifier.v2"
MODEL_AIRPURIFIER_V3 = "zhimi.airpurifier.v3"
MODEL_AIRPURIFIER_V5 = "zhimi.airpurifier.v5"
MODEL_AIRHUMIDIFIER_V1 = "zhimi.humidifier.v1"
MODEL_AIRHUMIDIFIER_CA1 = "zhimi.humidifier.ca1"
MODEL_AIRHUMIDIFIER_CA4 = "zhimi.humidifier.ca4"
MODEL_AIRHUMIDIFIER_CB1 = "zhimi.humidifier.cb1"
MODEL_AIRHUMIDIFIER_JSQ = "deerma.humidifier.jsq"
MODEL_AIRHUMIDIFIER_JSQ1 = "deerma.humidifier.jsq1"
MODEL_AIRHUMIDIFIER_MJJSQ = "deerma.humidifier.mjjsq"
MODEL_AIRFRESH_VA2 = "zhimi.airfresh.va2"
MODEL_FAN_P5 = "dmaker.fan.p5"
MODEL_FAN_SA1 = "zhimi.fan.sa1"
MODEL_FAN_V2 = "zhimi.fan.v2"
MODEL_FAN_V3 = "zhimi.fan.v3"
MODEL_FAN_ZA1 = "zhimi.fan.za1"
MODEL_FAN_ZA3 = "zhimi.fan.za3"
MODEL_FAN_ZA4 = "zhimi.fan.za4"
MODELS_FAN_MIIO = [
MODEL_FAN_P5,
MODEL_FAN_SA1,
MODEL_FAN_V2,
MODEL_FAN_V3,
MODEL_FAN_ZA1,
MODEL_FAN_ZA3,
MODEL_FAN_ZA4,
]
MODELS_PURIFIER_MIOT = [
MODEL_AIRPURIFIER_3,
MODEL_AIRPURIFIER_3C,
MODEL_AIRPURIFIER_3H,
MODEL_AIRPURIFIER_PROH,
]
MODELS_PURIFIER_MIIO = [
MODEL_AIRPURIFIER_V1,
MODEL_AIRPURIFIER_V2,
MODEL_AIRPURIFIER_V3,
MODEL_AIRPURIFIER_V5,
MODEL_AIRPURIFIER_PRO,
MODEL_AIRPURIFIER_PRO_V7,
MODEL_AIRPURIFIER_M1,
MODEL_AIRPURIFIER_M2,
MODEL_AIRPURIFIER_MA1,
MODEL_AIRPURIFIER_MA2,
MODEL_AIRPURIFIER_SA1,
MODEL_AIRPURIFIER_SA2,
MODEL_AIRPURIFIER_2S,
MODEL_AIRPURIFIER_2H,
MODEL_AIRFRESH_VA2,
]
MODELS_HUMIDIFIER_MIIO = [
MODEL_AIRHUMIDIFIER_V1,
MODEL_AIRHUMIDIFIER_CA1,
MODEL_AIRHUMIDIFIER_CB1,
]
MODELS_HUMIDIFIER_MIOT = [MODEL_AIRHUMIDIFIER_CA4]
MODELS_HUMIDIFIER_MJJSQ = [
MODEL_AIRHUMIDIFIER_JSQ,
MODEL_AIRHUMIDIFIER_JSQ1,
MODEL_AIRHUMIDIFIER_MJJSQ,
]
# AirQuality Models
MODEL_AIRQUALITYMONITOR_V1 = "zhimi.airmonitor.v1"
MODEL_AIRQUALITYMONITOR_B1 = "cgllc.airmonitor.b1"
MODEL_AIRQUALITYMONITOR_S1 = "cgllc.airmonitor.s1"
MODEL_AIRQUALITYMONITOR_CGDN1 = "cgllc.airm.cgdn1"
# Light Models
MODELS_LIGHT_EYECARE = ["philips.light.sread1"]
MODELS_LIGHT_CEILING = ["philips.light.ceiling", "philips.light.zyceiling"]
MODELS_LIGHT_MOON = ["philips.light.moonlight"]
MODELS_LIGHT_BULB = [
"philips.light.bulb",
"philips.light.candle",
"philips.light.candle2",
"philips.light.downlight",
]
MODELS_LIGHT_MONO = ["philips.light.mono1"]
# Model lists
MODELS_GATEWAY = ["lumi.gateway", "lumi.acpartner"]
MODELS_SWITCH = [
"chuangmi.plug.v1",
"chuangmi.plug.v3",
"chuangmi.plug.hmi208",
"qmi.powerstrip.v1",
"zimi.powerstrip.v2",
"chuangmi.plug.m1",
"chuangmi.plug.m3",
"chuangmi.plug.v2",
"chuangmi.plug.hmi205",
"chuangmi.plug.hmi206",
]
MODELS_FAN = MODELS_PURIFIER_MIIO + MODELS_PURIFIER_MIOT + MODELS_FAN_MIIO
MODELS_HUMIDIFIER = (
MODELS_HUMIDIFIER_MIOT + MODELS_HUMIDIFIER_MIIO + MODELS_HUMIDIFIER_MJJSQ
)
MODELS_LIGHT = (
MODELS_LIGHT_EYECARE
+ MODELS_LIGHT_CEILING
+ MODELS_LIGHT_MOON
+ MODELS_LIGHT_BULB
+ MODELS_LIGHT_MONO
)
MODELS_VACUUM = ["roborock.vacuum", "rockrobo.vacuum"]
MODELS_AIR_MONITOR = [
MODEL_AIRQUALITYMONITOR_V1,
MODEL_AIRQUALITYMONITOR_B1,
MODEL_AIRQUALITYMONITOR_S1,
MODEL_AIRQUALITYMONITOR_CGDN1,
]
MODELS_ALL_DEVICES = (
MODELS_SWITCH
+ MODELS_VACUUM
+ MODELS_AIR_MONITOR
+ MODELS_FAN
+ MODELS_HUMIDIFIER
+ MODELS_LIGHT
)
MODELS_ALL = MODELS_ALL_DEVICES + MODELS_GATEWAY
# Fan/Humidifier Services
SERVICE_SET_FAVORITE_LEVEL = "fan_set_favorite_level"
SERVICE_SET_FAN_LEVEL = "fan_set_fan_level"
SERVICE_SET_VOLUME = "fan_set_volume"
SERVICE_RESET_FILTER = "fan_reset_filter"
SERVICE_SET_EXTRA_FEATURES = "fan_set_extra_features"
SERVICE_SET_DRY = "set_dry"
SERVICE_SET_MOTOR_SPEED = "fan_set_motor_speed"
# Light Services
SERVICE_SET_SCENE = "light_set_scene"
SERVICE_SET_DELAYED_TURN_OFF = "light_set_delayed_turn_off"
SERVICE_REMINDER_ON = "light_reminder_on"
SERVICE_REMINDER_OFF = "light_reminder_off"
SERVICE_NIGHT_LIGHT_MODE_ON = "light_night_light_mode_on"
SERVICE_NIGHT_LIGHT_MODE_OFF = "light_night_light_mode_off"
SERVICE_EYECARE_MODE_ON = "light_eyecare_mode_on"
SERVICE_EYECARE_MODE_OFF = "light_eyecare_mode_off"
# Remote Services
SERVICE_LEARN = "remote_learn_command"
SERVICE_SET_REMOTE_LED_ON = "remote_set_led_on"
SERVICE_SET_REMOTE_LED_OFF = "remote_set_led_off"
# Switch Services
SERVICE_SET_WIFI_LED_ON = "switch_set_wifi_led_on"
SERVICE_SET_WIFI_LED_OFF = "switch_set_wifi_led_off"
SERVICE_SET_POWER_MODE = "switch_set_power_mode"
SERVICE_SET_POWER_PRICE = "switch_set_power_price"
# Vacuum Services
SERVICE_MOVE_REMOTE_CONTROL = "vacuum_remote_control_move"
SERVICE_MOVE_REMOTE_CONTROL_STEP = "vacuum_remote_control_move_step"
SERVICE_START_REMOTE_CONTROL = "vacuum_remote_control_start"
SERVICE_STOP_REMOTE_CONTROL = "vacuum_remote_control_stop"
SERVICE_CLEAN_SEGMENT = "vacuum_clean_segment"
SERVICE_CLEAN_ZONE = "vacuum_clean_zone"
SERVICE_GOTO = "vacuum_goto"
# Features
FEATURE_SET_BUZZER = 1
FEATURE_SET_LED = 2
FEATURE_SET_CHILD_LOCK = 4
FEATURE_SET_LED_BRIGHTNESS = 8
FEATURE_SET_FAVORITE_LEVEL = 16
FEATURE_SET_AUTO_DETECT = 32
FEATURE_SET_LEARN_MODE = 64
FEATURE_SET_VOLUME = 128
FEATURE_RESET_FILTER = 256
FEATURE_SET_EXTRA_FEATURES = 512
FEATURE_SET_TARGET_HUMIDITY = 1024
FEATURE_SET_DRY = 2048
FEATURE_SET_FAN_LEVEL = 4096
FEATURE_SET_MOTOR_SPEED = 8192
FEATURE_SET_CLEAN = 16384
FEATURE_SET_OSCILLATION_ANGLE = 32768
FEATURE_SET_OSCILLATION_ANGLE_MAX_140 = 65536
FEATURE_SET_DELAY_OFF_COUNTDOWN = 131072
FEATURE_SET_LED_BRIGHTNESS_LEVEL = 262144
FEATURE_SET_FAVORITE_RPM = 524288
FEATURE_FLAGS_AIRPURIFIER_MIIO = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_LEARN_MODE
| FEATURE_RESET_FILTER
| FEATURE_SET_EXTRA_FEATURES
)
FEATURE_FLAGS_AIRPURIFIER_MIOT = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_FAN_LEVEL
| FEATURE_SET_LED_BRIGHTNESS
)
FEATURE_FLAGS_AIRPURIFIER_3C = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED_BRIGHTNESS_LEVEL
| FEATURE_SET_FAVORITE_RPM
)
FEATURE_FLAGS_AIRPURIFIER_PRO = (
FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_VOLUME
)
FEATURE_FLAGS_AIRPURIFIER_PRO_V7 = (
FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
| FEATURE_SET_VOLUME
)
FEATURE_FLAGS_AIRPURIFIER_2S = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_FAVORITE_LEVEL
)
FEATURE_FLAGS_AIRPURIFIER_V1 = FEATURE_FLAGS_AIRPURIFIER_MIIO | FEATURE_SET_AUTO_DETECT
FEATURE_FLAGS_AIRPURIFIER_V3 = (
FEATURE_SET_BUZZER | FEATURE_SET_CHILD_LOCK | FEATURE_SET_LED
)
FEATURE_FLAGS_AIRHUMIDIFIER = (
FEATURE_SET_BUZZER | FEATURE_SET_CHILD_LOCK | FEATURE_SET_TARGET_HUMIDITY
)
FEATURE_FLAGS_AIRHUMIDIFIER_CA_AND_CB = FEATURE_FLAGS_AIRHUMIDIFIER | FEATURE_SET_DRY
FEATURE_FLAGS_AIRHUMIDIFIER_MJSSQ = (
FEATURE_SET_BUZZER | FEATURE_SET_LED | FEATURE_SET_TARGET_HUMIDITY
)
FEATURE_FLAGS_AIRHUMIDIFIER_CA4 = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_TARGET_HUMIDITY
| FEATURE_SET_DRY
| FEATURE_SET_MOTOR_SPEED
| FEATURE_SET_CLEAN
)
FEATURE_FLAGS_AIRFRESH = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_LED
| FEATURE_SET_LED_BRIGHTNESS
| FEATURE_RESET_FILTER
| FEATURE_SET_EXTRA_FEATURES
)
FEATURE_FLAGS_FAN_P5 = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_OSCILLATION_ANGLE_MAX_140
| FEATURE_SET_LED
| FEATURE_SET_DELAY_OFF_COUNTDOWN
)
FEATURE_FLAGS_FAN = (
FEATURE_SET_BUZZER
| FEATURE_SET_CHILD_LOCK
| FEATURE_SET_OSCILLATION_ANGLE
| FEATURE_SET_LED_BRIGHTNESS
| FEATURE_SET_DELAY_OFF_COUNTDOWN
)
| |
import numpy as np
from scipy import linalg
from copy import deepcopy
from ..io.constants import FIFF
from ..io.pick import pick_types, pick_info
from ..surface import get_head_surf, get_meg_helmet_surf
from ..io.proj import _has_eeg_average_ref_proj, make_projector
from ..transforms import transform_surface_to, read_trans, _find_trans
from ._make_forward import _create_coils
from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
_get_legen_lut_fast, _get_legen_lut_accurate)
from ..parallel import check_n_jobs
from ..utils import logger, verbose
from ..fixes import partial
def _is_axial_coil(coil):
is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
FIFF.FWD_COILC_AXIAL_GRAD,
FIFF.FWD_COILC_AXIAL_GRAD2)
return is_ax
def _ad_hoc_noise(coils, ch_type='meg'):
v = np.empty(len(coils))
if ch_type == 'meg':
axs = np.array([_is_axial_coil(coil) for coil in coils], dtype=bool)
v[axs] = 4e-28 # 20e-15 ** 2
v[np.logical_not(axs)] = 2.5e-25 # 5e-13 ** 2
else:
v.fill(1e-12) # 1e-6 ** 2
cov = dict(diag=True, data=v, eig=None, eigvec=None)
return cov
def _compute_mapping_matrix(fmd, info):
"""Do the hairy computations"""
logger.info('preparing the mapping matrix...')
# assemble a projector and apply it to the data
ch_names = fmd['ch_names']
projs = info.get('projs', list())
proj_op = make_projector(projs, ch_names)[0]
proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op))
noise_cov = fmd['noise']
# Whiten
if not noise_cov['diag']:
raise NotImplementedError # this shouldn't happen
whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel()))
whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener))
# SVD is numerically better than the eigenvalue composition even if
# mat is supposed to be symmetric and positive definite
uu, sing, vv = linalg.svd(whitened_dots, full_matrices=False,
overwrite_a=True)
# Eigenvalue truncation
sumk = np.cumsum(sing)
sumk /= sumk[-1]
fmd['nest'] = np.where(sumk > (1.0 - fmd['miss']))[0][0]
logger.info('Truncate at %d missing %g' % (fmd['nest'], fmd['miss']))
sing = 1.0 / sing[:fmd['nest']]
# Put the inverse together
logger.info('Put the inverse together...')
inv = np.dot(uu[:, :fmd['nest']] * sing, vv[:fmd['nest']]).T
# Sandwich with the whitener
inv_whitened = np.dot(whitener.T, np.dot(inv, whitener))
# Take into account that the lead fields used to compute
# d->surface_dots were unprojected
inv_whitened_proj = (np.dot(inv_whitened.T, proj_op)).T
# Finally sandwich in the selection matrix
# This one picks up the correct lead field projection
mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj)
# Optionally apply the average electrode reference to the final field map
if fmd['kind'] == 'eeg':
if _has_eeg_average_ref_proj(projs):
logger.info('The map will have average electrode reference')
mapping_mat -= np.mean(mapping_mat, axis=0)[np.newaxis, :]
return mapping_mat
@verbose
def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
n_jobs=1, verbose=None):
"""Re-map M/EEG data to a surface
Parameters
----------
info : instance of io.meas_info.Info
Measurement info.
surf : dict
The surface to map the data to. The required fields are `'rr'`,
`'nn'`, and `'coord_frame'`. Must be in head coordinates.
ch_type : str
Must be either `'meg'` or `'eeg'`, determines the type of field.
trans : None | dict
If None, no transformation applied. Should be a Head<->MRI
transformation.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
n_jobs : int
Number of permutations to eggie in parallel (requires joblib package).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
mapping : array
A n_vertices x n_sensors array that remaps the MEG or EEG data,
as `new_data = np.dot(mapping, data)`.
"""
if not all([key in surf for key in ['rr', 'nn']]):
raise KeyError('surf must have both "rr" and "nn"')
if 'coord_frame' not in surf:
raise KeyError('The surface coordinate frame must be specified '
'in surf["coord_frame"]')
if mode not in ['accurate', 'fast']:
raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
# deal with coordinate frames here -- always go to "head" (easiest)
if surf['coord_frame'] == FIFF.FIFFV_COORD_MRI:
if trans is None or FIFF.FIFFV_COORD_MRI not in [trans['to'],
trans['from']]:
raise ValueError('trans must be a Head<->MRI transform if the '
'surface is not in head coordinates.')
surf = transform_surface_to(deepcopy(surf), 'head', trans)
n_jobs = check_n_jobs(n_jobs)
#
# Step 1. Prepare the coil definitions
# Do the dot products, assume surf in head coords
#
if ch_type not in ('meg', 'eeg'):
raise ValueError('unknown coil type "%s"' % ch_type)
if ch_type == 'meg':
picks = pick_types(info, meg=True, eeg=False, ref_meg=False)
logger.info('Prepare MEG mapping...')
else:
picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
logger.info('Prepare EEG mapping...')
if len(picks) == 0:
raise RuntimeError('cannot map, no channels found')
chs = pick_info(info, picks)['chs']
# create coil defs in head coordinates
if ch_type == 'meg':
# Put them in head coordinates
coils = _create_coils(chs, FIFF.FWD_COIL_ACCURACY_NORMAL,
info['dev_head_t'], coil_type='meg')[0]
type_str = 'coils'
miss = 1e-4 # Smoothing criterion for MEG
else: # EEG
coils = _create_coils(chs, coil_type='eeg')[0]
type_str = 'electrodes'
miss = 1e-3 # Smoothing criterion for EEG
#
# Step 2. Calculate the dot products
#
my_origin = np.array([0.0, 0.0, 0.04])
int_rad = 0.06
noise = _ad_hoc_noise(coils, ch_type)
if mode == 'fast':
# Use 50 coefficients with nearest-neighbor interpolation
lut, n_fact = _get_legen_table(ch_type, False, 50)
lut_fun = partial(_get_legen_lut_fast, lut=lut)
else: # 'accurate'
# Use 100 coefficients with linear interpolation
lut, n_fact = _get_legen_table(ch_type, False, 100)
lut_fun = partial(_get_legen_lut_accurate, lut=lut)
logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
self_dots = _do_self_dots(int_rad, False, coils, my_origin, ch_type,
lut_fun, n_fact, n_jobs)
sel = np.arange(len(surf['rr'])) # eventually we should do sub-selection
logger.info('Computing dot products for %i surface locations...'
% len(sel))
surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel,
my_origin, ch_type, lut_fun, n_fact,
n_jobs)
#
# Step 4. Return the result
#
ch_names = [c['ch_name'] for c in chs]
fmd = dict(kind=ch_type, surf=surf, ch_names=ch_names, coils=coils,
origin=my_origin, noise=noise, self_dots=self_dots,
surface_dots=surface_dots, int_rad=int_rad, miss=miss)
logger.info('Field mapping data ready')
fmd['data'] = _compute_mapping_matrix(fmd, info)
# Remove some unecessary fields
del fmd['self_dots']
del fmd['surface_dots']
del fmd['int_rad']
del fmd['miss']
return fmd
def make_field_map(evoked, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, mode='fast', n_jobs=1):
"""Compute surface maps used for field display in 3D
Parameters
----------
evoked : Evoked | Epochs | Raw
The measurement file. Need to have info attribute.
trans_fname : str | 'auto' | None
The full path to the `*-trans.fif` file produced during
coregistration. If present or found using 'auto'
the maps will be in MRI coordinates.
If None, map for EEG data will not be available.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None, map for EEG data will not be available.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, a map for each available channel type will be returned.
Else only the specified type will be used.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
n_jobs : int
The number of jobs to eggie in parallel.
Returns
-------
surf_maps : list
The surface maps to be used for field plots. The list contains
separate ones for MEG and EEG (if both MEG and EEG are present).
"""
info = evoked.info
if ch_type is None:
types = [t for t in ['eeg', 'meg'] if t in evoked]
else:
if ch_type not in ['eeg', 'meg']:
raise ValueError("ch_type should be 'eeg' or 'meg' (got %s)"
% ch_type)
types = [ch_type]
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
if 'eeg' in types and trans_fname is None:
logger.info('No trans file available. EEG data ignored.')
types.remove('eeg')
if len(types) == 0:
raise RuntimeError('No data available for mapping.')
trans = None
if trans_fname is not None:
trans = read_trans(trans_fname)
surfs = []
for this_type in types:
if this_type == 'meg':
surf = get_meg_helmet_surf(info, trans)
else:
surf = get_head_surf(subject, subjects_dir=subjects_dir)
surfs.append(surf)
surf_maps = list()
for this_type, this_surf in zip(types, surfs):
this_map = _make_surface_mapping(evoked.info, this_surf, this_type,
trans, n_jobs=n_jobs)
this_map['surf'] = this_surf # XXX : a bit weird...
surf_maps.append(this_map)
return surf_maps
| |
from __future__ import absolute_import, division, print_function
from itertools import product
from math import ceil
from numbers import Number
from operator import getitem, add, itemgetter
import numpy as np
from toolz import merge, accumulate, pluck, memoize
from ..base import tokenize
from ..compatibility import long
colon = slice(None, None, None)
def sanitize_index(ind):
""" Sanitize the elements for indexing along one axis
>>> sanitize_index([2, 3, 5])
[2, 3, 5]
>>> sanitize_index([True, False, True, False])
[0, 2]
>>> sanitize_index(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index(np.array([False, True, True]))
[1, 2]
>>> type(sanitize_index(np.int32(0)))
<type 'int'>
>>> sanitize_index(1.0)
1
>>> sanitize_index(0.5)
Traceback (most recent call last):
...
IndexError: Bad index. Must be integer-like: 0.5
"""
if isinstance(ind, Number):
ind2 = int(ind)
if ind2 != ind:
raise IndexError("Bad index. Must be integer-like: %s" % ind)
else:
return ind2
if hasattr(ind, 'tolist'):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
if isinstance(ind, list):
return [sanitize_index(i) for i in ind]
if isinstance(ind, slice):
return slice(sanitize_index(ind.start),
sanitize_index(ind.stop),
sanitize_index(ind.step))
if ind is None:
return ind
try:
return sanitize_index(np.array(ind).tolist())
except:
raise TypeError("Invalid index type", type(ind), ind)
def slice_array(out_name, in_name, blockdims, index):
"""
Master function for array slicing
This function makes a new dask that slices blocks along every
dimension and aggregates (via cartesian product) each dimension's
slices so that the resulting block slices give the same results
as the original slice on the original structure
Index must be a tuple. It may contain the following types
int, slice, list (at most one list), None
Parameters
----------
in_name - string
This is the dask variable name that will be used as input
out_name - string
This is the dask variable output name
blockshape - iterable of integers
index - iterable of integers, slices, lists, or None
Returns
-------
Dict where the keys are tuples of
(out_name, dim_index[, dim_index[, ...]])
and the values are
(function, (in_name, dim_index, dim_index, ...),
(slice(...), [slice()[,...]])
Also new blockdims with shapes of each block
((10, 10, 10, 10), (20, 20))
Examples
--------
>>> dsk, blockdims = slice_array('y', 'x', [(20, 20, 20, 20, 20)],
... (slice(10, 35),)) # doctest: +SKIP
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), (slice(10, 20),)),
('y', 1): (getitem, ('x', 1), (slice(0, 15),))}
>>> blockdims # doctest: +SKIP
((10, 15),)
See Also
--------
This function works by successively unwrapping cases and passing down
through a sequence of functions.
slice_with_newaxis - handle None/newaxis case
slice_wrap_lists - handle fancy indexing with lists
slice_slices_and_integers - handle everything else
"""
index = replace_ellipsis(len(blockdims), index)
index = tuple(map(sanitize_index, index))
blockdims = tuple(map(tuple, blockdims))
# x[:, :, :] - Punt and return old value
if all(index == slice(None, None, None) for index in index):
suffixes = product(*[range(len(bd)) for bd in blockdims])
dsk = dict(((out_name,) + s, (in_name,) + s)
for s in suffixes)
return dsk, blockdims
# Add in missing colons at the end as needed. x[5] -> x[5, :, :]
missing = len(blockdims) - (len(index) - index.count(None))
index += (slice(None, None, None),) * missing
# Pass down to next function
dsk_out, bd_out = slice_with_newaxes(out_name, in_name, blockdims, index)
bd_out = tuple(map(tuple, bd_out))
return dsk_out, bd_out
def slice_with_newaxes(out_name, in_name, blockdims, index):
"""
Handle indexing with Nones
Strips out Nones then hands off to slice_wrap_lists
"""
# Strip Nones from index
index2 = tuple([ind for ind in index if ind is not None])
where_none = [i for i, ind in enumerate(index) if ind is None]
where_none_orig = list(where_none)
for i, x in enumerate(where_none):
n = sum(isinstance(ind, int) for ind in index[:x])
if n:
where_none[i] -= n
# Pass down and do work
dsk, blockdims2 = slice_wrap_lists(out_name, in_name, blockdims, index2)
if where_none:
expand = expander(where_none)
expand_orig = expander(where_none_orig)
# Insert ",0" into the key: ('x', 2, 3) -> ('x', 0, 2, 0, 3)
dsk2 = {(out_name,) + expand(k[1:], 0):
(v[:2] + (expand_orig(v[2], None),))
for k, v in dsk.items()
if k[0] == out_name}
# Add back intermediate parts of the dask that weren't the output
dsk3 = merge(dsk2, {k: v for k, v in dsk.items() if k[0] != out_name})
# Insert (1,) into blockdims: ((2, 2), (3, 3)) -> ((2, 2), (1,), (3, 3))
blockdims3 = expand(blockdims2, (1,))
return dsk3, blockdims3
else:
return dsk, blockdims2
def slice_wrap_lists(out_name, in_name, blockdims, index):
"""
Fancy indexing along blocked array dasks
Handles index of type list. Calls slice_slices_and_integers for the rest
See Also
--------
take - handle slicing with lists ("fancy" indexing)
slice_slices_and_integers - handle slicing with slices and integers
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(i, (slice, list, int, long)) for i in index)
if not len(blockdims) == len(index):
raise IndexError("Too many indices for array")
for bd, i in zip(blockdims, index):
check_index(i, sum(bd))
# Change indices like -1 to 9
index2 = posify_index(shape, index)
# Do we have more than one list in the index?
where_list = [i for i, ind in enumerate(index) if isinstance(ind, list)]
if len(where_list) > 1:
raise NotImplementedError("Don't yet support nd fancy indexing")
# No lists, hooray! just use slice_slices_and_integers
if not where_list:
return slice_slices_and_integers(out_name, in_name, blockdims, index2)
# Replace all lists with full slices [3, 1, 0] -> slice(None, None, None)
index_without_list = tuple(slice(None, None, None)
if isinstance(i, list) else i
for i in index2)
# lists and full slices. Just use take
if all(isinstance(i, list) or i == slice(None, None, None)
for i in index2):
axis = where_list[0]
blockdims2, dsk3 = take(out_name, in_name, blockdims,
index2[where_list[0]], axis=axis)
# Mixed case. Both slices/integers and lists. slice/integer then take
else:
# Do first pass without lists
tmp = 'slice-' + tokenize((out_name, in_name, blockdims, index))
dsk, blockdims2 = slice_slices_and_integers(tmp, in_name, blockdims, index_without_list)
# After collapsing some axes due to int indices, adjust axis parameter
axis = where_list[0]
axis2 = axis - sum(1 for i, ind in enumerate(index2)
if i < axis and isinstance(ind, (int, long)))
# Do work
blockdims2, dsk2 = take(out_name, tmp, blockdims2, index2[axis],
axis=axis2)
dsk3 = merge(dsk, dsk2)
return dsk3, blockdims2
def slice_slices_and_integers(out_name, in_name, blockdims, index):
"""
Dask array indexing with slices and integers
See Also
--------
_slice_1d
"""
shape = tuple(map(sum, blockdims))
assert all(isinstance(ind, (slice, int, long)) for ind in index)
assert len(index) == len(blockdims)
# Get a list (for each dimension) of dicts{blocknum: slice()}
block_slices = list(map(_slice_1d, shape, blockdims, index))
sorted_block_slices = [sorted(i.items()) for i in block_slices]
# (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))
# (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
out_names = list(product([out_name],
*[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
for d, i in zip(block_slices, index)
if not isinstance(i, (int, long))]))
all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))
dsk_out = {out_name: (getitem, in_name, slices)
for out_name, in_name, slices
in zip(out_names, in_names, all_slices)}
new_blockdims = [new_blockdim(d, db, i)
for d, i, db in zip(shape, index, blockdims)
if not isinstance(i, (int, long))]
return dsk_out, new_blockdims
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
Trivial slicing
>>> _slice_1d(100, [60, 40], slice(None, None, None))
{0: slice(None, None, None), 1: slice(None, None, None)}
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
if index == colon:
return {k: colon for k in range(len(lengths))}
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
chunk_boundaries = list(accumulate(add, lengths))
for i, chunk_stop in reversed(list(enumerate(chunk_boundaries))):
# create a chunk start and stop
if i == 0:
chunk_start = 0
else:
chunk_start = chunk_boundaries[i - 1]
# if our slice is in this chunk
if (chunk_start <= rstart < chunk_stop) and (rstart > stop):
d[i] = slice(rstart - chunk_stop,
max(chunk_start - chunk_stop - 1,
stop - chunk_stop),
step)
# compute the next running start point,
offset = (rstart - (chunk_start - 1)) % step
rstart = chunk_start + offset - 1
# replace 0:20:1 with : if appropriate
for k, v in d.items():
if v == slice(0, lengths[k], 1):
d[k] = slice(None, None, None)
if not d: # special case x[:0]
d[0] = slice(0, 0, 1)
return d
def partition_by_size(sizes, seq):
"""
>>> partition_by_size([10, 20, 10], [1, 5, 9, 12, 29, 35])
[[1, 5, 9], [2, 19], [5]]
"""
seq = np.array(seq)
right = np.cumsum(sizes)
locations = np.searchsorted(seq, right)
locations = [0] + locations.tolist()
left = [0] + right.tolist()
return [(seq[locations[i]:locations[i + 1]] - left[i]).tolist()
for i in range(len(locations) - 1)]
def issorted(seq):
""" Is sequence sorted?
>>> issorted([1, 2, 3])
True
>>> issorted([3, 1, 2])
False
"""
if not seq:
return True
x = seq[0]
for elem in seq[1:]:
if elem < x:
return False
x = elem
return True
def take_sorted(outname, inname, blockdims, index, axis=0):
""" Index array with sorted list index
Forms a dask for the following case
x[:, [1, 3, 5, 10], ...]
where the index, ``[1, 3, 5, 10]`` is sorted in non-decreasing order.
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 1): (getitem, ('x', 2), ([7],))}
See Also
--------
take - calls this function
"""
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
where_index = [i for i, il in enumerate(index_lists) if il]
index_lists = [il for il in index_lists if il]
dims = [range(len(bd)) for bd in blockdims]
indims = list(dims)
indims[axis] = list(range(len(where_index)))
keys = list(product([outname], *indims))
outdims = list(dims)
outdims[axis] = where_index
slices = [[colon] * len(bd) for bd in blockdims]
slices[axis] = index_lists
slices = list(product(*slices))
inkeys = list(product([inname], *outdims))
values = [(getitem, inkey, slc) for inkey, slc in zip(inkeys, slices)]
blockdims2 = list(blockdims)
blockdims2[axis] = tuple(map(len, index_lists))
return tuple(blockdims2), dict(zip(keys, values))
def take(outname, inname, blockdims, index, axis=0):
""" Index array with an iterable of index
Handles a single index by a single list
Mimics ``np.take``
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
>>> blockdims
((4,),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, (np.concatenate, [(getitem, ('x', 0), ([1, 3, 5],)),
(getitem, ('x', 2), ([7],))],
0),
(2, 0, 4, 1))}
When list is sorted we retain original block structure
>>> blockdims, dsk = take('y', 'x', [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
>>> blockdims
((3, 1),)
>>> dsk # doctest: +SKIP
{('y', 0): (getitem, ('x', 0), ([1, 3, 5],)),
('y', 2): (getitem, ('x', 2), ([7],))}
"""
if issorted(index):
return take_sorted(outname, inname, blockdims, index, axis)
n = len(blockdims)
sizes = blockdims[axis] # the blocksizes on the axis that we care about
index_lists = partition_by_size(sizes, sorted(index))
dims = [[0] if axis == i else list(range(len(bd)))
for i, bd in enumerate(blockdims)]
keys = list(product([outname], *dims))
rev_index = list(map(sorted(index).index, index))
vals = [(getitem, (np.concatenate,
[(getitem, ((inname, ) + d[:axis] + (i, ) + d[axis + 1:]),
((colon, ) * axis + (IL, ) + (colon, ) * (n - axis - 1)))
for i, IL in enumerate(index_lists) if IL], axis),
((colon, ) * axis + (rev_index, ) + (colon, ) * (n - axis - 1)))
for d in product(*dims)]
blockdims2 = list(blockdims)
blockdims2[axis] = (len(index), )
return tuple(blockdims2), dict(zip(keys, vals))
def posify_index(shape, ind):
""" Flip negative indices around to positive ones
>>> posify_index(10, 3)
3
>>> posify_index(10, -3)
7
>>> posify_index(10, [3, -3])
[3, 7]
>>> posify_index((10, 20), (3, -3))
(3, 17)
>>> posify_index((10, 20), (3, [3, 4, -3]))
(3, [3, 4, 17])
"""
if isinstance(ind, tuple):
return tuple(map(posify_index, shape, ind))
if isinstance(ind, (int, long)):
if ind < 0:
return ind + shape
else:
return ind
if isinstance(ind, list):
return [i + shape if i < 0 else i for i in ind]
return ind
def insert_many(seq, where, val):
""" Insert value at many locations in sequence
>>> insert_many(['a', 'b', 'c'], [0, 2], 'z')
('z', 'a', 'z', 'b', 'c')
"""
seq = list(seq)
result = []
for i in range(len(where) + len(seq)):
if i in where:
result.append(val)
else:
result.append(seq.pop(0))
return tuple(result)
@memoize
def _expander(where):
if not where:
def expand(seq, val):
return seq
return expand
else:
decl = """def expand(seq, val):
return ({left}) + tuple({right})
"""
left = []
j = 0
for i in range(max(where) + 1):
if i in where:
left.append("val, ")
else:
left.append("seq[%d], " % j)
j += 1
right = "seq[%d:]" % j
left = "".join(left)
decl = decl.format(**locals())
ns = {}
exec(compile(decl, "<dynamic>", "exec"), ns, ns)
return ns['expand']
def expander(where):
""" An optimized version of insert_many() when *where*
is known upfront and used many times.
>>> expander([0, 2])(['a', 'b', 'c'], 'z')
('z', 'a', 'z', 'b', 'c')
"""
return _expander(tuple(where))
def new_blockdim(dim_shape, lengths, index):
"""
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(0, 90, 2))
[10, 5, 10, 5, 15]
>>> new_blockdim(100, [20, 10, 20, 10, 40], [5, 1, 30, 22])
[4]
>>> new_blockdim(100, [20, 10, 20, 10, 40], slice(90, 10, -2))
[16, 5, 10, 5, 4]
"""
if isinstance(index, list):
return [len(index)]
assert not isinstance(index, (int, long))
pairs = sorted(_slice_1d(dim_shape, lengths, index).items(),
key=itemgetter(0))
slices = [slice(0, lengths[i], 1) if slc == slice(None, None, None) else slc
for i, slc in pairs]
if isinstance(index, slice) and index.step and index.step < 0:
slices = slices[::-1]
return [int(ceil((1. * slc.stop - slc.start) / slc.step)) for slc in slices]
def replace_ellipsis(n, index):
""" Replace ... with slices, :, : ,:
>>> replace_ellipsis(4, (3, Ellipsis, 2))
(3, slice(None, None, None), slice(None, None, None), 2)
>>> replace_ellipsis(2, (Ellipsis, None))
(slice(None, None, None), slice(None, None, None), None)
"""
# Careful about using in or index because index may contain arrays
isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]
extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)
if not isellipsis:
return index
else:
loc = isellipsis[0]
return (index[:loc] + (slice(None, None, None),) * extra_dimensions +
index[loc + 1:])
def check_index(ind, dimension):
""" Check validity of index for a given dimension
Examples
--------
>>> check_index(3, 5)
>>> check_index(5, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 5 >= 5
>>> check_index(6, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 6 >= 5
>>> check_index(-1, 5)
>>> check_index(-6, 5)
Traceback (most recent call last):
...
IndexError: Negative index is not greater than negative dimension -6 <= -5
>>> check_index([1, 2], 5)
>>> check_index([6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index out of bounds 5
>>> check_index(slice(0, 3), 5)
"""
if isinstance(ind, list):
x = np.array(ind)
if (x >= dimension).any() or (x <= -dimension).any():
raise IndexError("Index out of bounds %s" % dimension)
elif isinstance(ind, slice):
return
elif ind >= dimension:
raise IndexError("Index is not smaller than dimension %d >= %d" %
(ind, dimension))
elif ind < -dimension:
msg = "Negative index is not greater than negative dimension %d <= -%d"
raise IndexError(msg % (ind, dimension))
| |
# PyVision License
#
# Copyright (c) 2009 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import numpy as np
import time
import pyvision as pv
import scipy.interpolate as it
import scipy.ndimage as nd
class RangeImage:
'''
This class is used to handle range images. Originally written to handle
output from the Minolta Vivid sensors distributed with the Face Recognition
Grand Challenge 2004
This implementation currently can parse range images in ".abs" or ".abs.gz" format.
Very little type checking is done during parsing so unexpected exception or
unusual behavior may occur if the file is not formated properly.
This is a sample for the .abs file format:
480 rows
640 columns
pixels (flag X Y Z):
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ...
-999999.000000 -999999.000000 -999999.000000 -9999 ...
-999999.000000 -999999.000000 -999999.000000 -9999 ...
-999999.000000 -999999.000000 -999999.000000 -9999 ...
Author: David S. Bolme 2009
'''
def __init__(self,filename):
'''
Reads a file containing range data.
'''
if filename[-7:] == '.abs.gz':
# Assume the date is a zlib compressed abs file
f = gzip.open(filename)
if filename[-4:] == '.abs':
# Assume uncompressed
f = open(filename)
#print buffer[:100]
#lines = buffer.split(EOL)
rows = int(f.next().split()[0])
cols = int(f.next().split()[0])
format = f.next()
self.width = cols
self.height = rows
self.flags = np.array([int(v) for v in f.next().split()]).reshape(rows,cols).transpose()
self.x = np.array([float(v) for v in f.next().split()]).reshape(rows,cols).transpose()
self.y = np.array([float(v) for v in f.next().split()]).reshape(rows,cols).transpose()
self.z = np.array([float(v) for v in f.next().split()]).reshape(rows,cols).transpose()
def getRange(self):
'''
@returns: xmin,xmax,ymin,ymax,zmin,zmax
'''
flags = np.array(self.flags.flatten(),dtype=np.bool)
X = self.x.flatten()[flags]
Y = self.y.flatten()[flags]
Z = self.z.flatten()[flags]
return min(X),max(X),min(Y),max(Y),min(Z),max(Z)
def getXImage(self):
'''
@returns: the x coordinates.
'''
xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
r,c = self.x.shape
flags = np.array(self.flags.flatten(),dtype=np.bool)
X = self.x.flatten().copy()
X[ flags != True ] = xmin
X = X.reshape(r,c)
return pv.Image(X)
def getYImage(self):
'''
@returns: the y coordinates.
'''
xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
r,c = self.x.shape
flags = np.array(self.flags.flatten(),dtype=np.bool)
Y = self.y.flatten().copy()
Y[ flags != True ] = ymin
Y = Y.reshape(r,c)
return pv.Image(Y)
def getZImage(self):
'''
@returns: the z coordinates.
'''
xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
r,c = self.x.shape
flags = np.array(self.flags.flatten(),dtype=np.bool)
Z = self.z.flatten().copy()
Z[ flags != True ] = zmin
Z = Z.reshape(r,c)
return pv.Image(Z)
def getMaskImage(self):
'''
@returns: the missing value mask.
'''
xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
r,c = self.x.shape
flags = np.array(self.flags.flatten(),dtype=np.bool)
Z = self.z.flatten().copy()
Z[ flags != True ] = zmin
Z = Z.reshape(r,c)
return pv.Image(Z)
def populateMissingData(self,approach="Smooth",ilog=None):
'''
This function is used to interpolate missing data in the image.
'''
if approach == 'Smooth':
# first run a median filter over the array, then smooth the result.
xmin,xmax,ymin,ymax,zmin,zmax = self.getRange()
mask = np.array(self.flags,dtype=np.bool)
z = self.getZImage().asMatrix2D()
median = nd.median_filter(z,size=(15,15))
mask = mask.flatten()
z = z.flatten()
median = median.flatten()
z[ mask==False ] = median[ mask==False ]
if ilog != None:
ilog.log(pv.Image(median.reshape(self.width,self.height)),label="Median")
ilog.log(pv.Image(z.reshape(self.width,self.height)),label="ZMedian")
mask = mask.flatten()
z = z.flatten()
median = median.flatten()
for i in range(5):
tmp = z.copy()
smooth = nd.gaussian_filter(z.reshape(self.width,self.height),2.0).flatten()
z[ mask==False ] = smooth[ mask==False ]
print "Iteration:",i,(z-tmp).max(),(z-tmp).min()
ilog.log(pv.Image(z.reshape(self.width,self.height)),label="ZSmooth%02d"%i)
ilog.log(pv.Image((z-tmp).reshape(self.width,self.height)),label="ZSmooth%02d"%i)
if approach == 'RBF':
mask = np.array(self.flags,dtype=np.bool)
mask = mask.flatten()
x = np.arange(self.width).reshape(self.width,1)
x = x*np.ones((1,self.height))
x = x.flatten()
y = np.arange(self.height).reshape(1,self.height)
y = y*np.ones((self.width,1))
y = y.flatten()
z = self.z.copy()
z = z.flatten()
print "Coords:"
print len(mask)
print len(x[mask])
print len(y[mask])
print len(z[mask])
# this produces an error. Probably has too much data
it.Rbf(x[mask],y[mask],z[mask])
pass
if __name__ == "__main__":
ilog = pv.ImageLog()
filename = "02463d562.abs.gz"
im = pv.Image("02463d563.ppm")
t = time.time()
ri = RangeImage(filename)
t = time.time() - t
print t
print ri.getRange()
ilog.log(ri.getXImage(),"X_Image")
ilog.log(ri.getYImage(),"Y_Image")
ilog.log(ri.getZImage(),"Z_Image")
ilog.log(im,"Color")
ri.populateMissingData(ilog=ilog)
ilog.show()
| |
"""Tests for parsers.py"""
import asyncio
import unittest
import unittest.mock
from aiohttp import errors
from aiohttp import parsers
class TestStreamParser(unittest.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
self.lines_parser = parsers.LinesParser()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_at_eof(self):
proto = parsers.StreamParser(loop=self.loop)
self.assertFalse(proto.at_eof())
proto.feed_eof()
self.assertTrue(proto.at_eof())
def test_exception(self):
stream = parsers.StreamParser(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_connection_error(self):
stream = parsers.StreamParser(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ConnectionError()
stream.set_exception(exc)
self.assertIsNot(stream.exception(), exc)
self.assertIsInstance(stream.exception(), RuntimeError)
self.assertIs(stream.exception().__cause__, exc)
self.assertIs(stream.exception().__context__, exc)
def test_exception_waiter(self):
stream = parsers.StreamParser(loop=self.loop)
stream._parser = self.lines_parser
buf = stream._output = parsers.FlowControlDataQueue(
stream, loop=self.loop)
exc = ValueError()
stream.set_exception(exc)
self.assertIs(buf.exception(), exc)
def test_feed_data(self):
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, bytes(stream._buffer))
def test_feed_none_data(self):
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(None)
self.assertEqual(b'', bytes(stream._buffer))
def test_set_parser_unset_prev(self):
stream = parsers.StreamParser(loop=self.loop)
stream.set_parser(self.lines_parser)
unset = stream.unset_parser = unittest.mock.Mock()
stream.set_parser(self.lines_parser)
self.assertTrue(unset.called)
def test_set_parser_exception(self):
stream = parsers.StreamParser(loop=self.loop)
exc = ValueError()
stream.set_exception(exc)
s = stream.set_parser(self.lines_parser)
self.assertIs(s.exception(), exc)
def test_set_parser_feed_existing(self):
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'\r\nline2\r\ndata')
s = stream.set_parser(self.lines_parser)
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(s._buffer))
self.assertEqual(b'data', bytes(stream._buffer))
self.assertIsNotNone(stream._parser)
stream.unset_parser()
self.assertIsNone(stream._parser)
self.assertEqual(b'data', bytes(stream._buffer))
self.assertTrue(s._eof)
def test_set_parser_feed_existing_exc(self):
def p(out, buf):
yield from buf.read(1)
raise ValueError()
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
s = stream.set_parser(p)
self.assertIsInstance(s.exception(), ValueError)
def test_set_parser_feed_existing_eof(self):
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'\r\nline2\r\ndata')
stream.feed_eof()
s = stream.set_parser(self.lines_parser)
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(s._buffer))
self.assertEqual(b'data', bytes(stream._buffer))
self.assertIsNone(stream._parser)
def test_set_parser_feed_existing_eof_exc(self):
def p(out, buf):
try:
while True:
yield # read chunk
except parsers.EofStream:
raise ValueError()
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_eof()
s = stream.set_parser(p)
self.assertIsInstance(s.exception(), ValueError)
def test_set_parser_feed_existing_eof_unhandled_eof(self):
def p(out, buf):
while True:
yield # read chunk
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_eof()
s = stream.set_parser(p)
self.assertFalse(s.is_eof())
self.assertIsInstance(s.exception(), RuntimeError)
def test_set_parser_unset(self):
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(self.lines_parser)
stream.feed_data(b'line1\r\nline2\r\n')
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(s._buffer))
self.assertEqual(b'', bytes(stream._buffer))
stream.unset_parser()
self.assertTrue(s._eof)
self.assertEqual(b'', bytes(stream._buffer))
def test_set_parser_feed_existing_stop(self):
def LinesParser(out, buf):
try:
chunk = yield from buf.readuntil(b'\n')
out.feed_data(chunk, len(chunk))
chunk = yield from buf.readuntil(b'\n')
out.feed_data(chunk, len(chunk))
finally:
out.feed_eof()
stream = parsers.StreamParser(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'\r\nline2\r\ndata')
s = stream.set_parser(LinesParser)
self.assertEqual(
b'line1\r\nline2\r\n', b''.join(d for d, _ in s._buffer))
self.assertEqual(b'data', bytes(stream._buffer))
self.assertIsNone(stream._parser)
self.assertTrue(s._eof)
def test_feed_parser(self):
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(self.lines_parser)
stream.feed_data(b'line1')
stream.feed_data(b'\r\nline2\r\ndata')
self.assertEqual(b'data', bytes(stream._buffer))
stream.feed_eof()
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(s._buffer))
self.assertEqual(b'data', bytes(stream._buffer))
self.assertTrue(s.is_eof())
def test_feed_parser_exc(self):
def p(out, buf):
yield # read chunk
raise ValueError()
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
self.assertIsInstance(s.exception(), ValueError)
self.assertEqual(b'', bytes(stream._buffer))
def test_feed_parser_stop(self):
def p(out, buf):
yield # chunk
stream = parsers.StreamParser(loop=self.loop)
stream.set_parser(p)
stream.feed_data(b'line1')
self.assertIsNone(stream._parser)
self.assertEqual(b'', bytes(stream._buffer))
def test_feed_eof_exc(self):
def p(out, buf):
try:
while True:
yield # read chunk
except parsers.EofStream:
raise ValueError()
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
self.assertIsNone(s.exception())
stream.feed_eof()
self.assertIsInstance(s.exception(), ValueError)
def test_feed_eof_stop(self):
def p(out, buf):
try:
while True:
yield # read chunk
except parsers.EofStream:
out.feed_eof()
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
stream.feed_eof()
self.assertTrue(s._eof)
def test_feed_eof_unhandled_eof(self):
def p(out, buf):
while True:
yield # read chunk
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
stream.feed_eof()
self.assertFalse(s.is_eof())
self.assertIsInstance(s.exception(), RuntimeError)
def test_feed_parser2(self):
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(self.lines_parser)
stream.feed_data(b'line1\r\nline2\r\n')
stream.feed_eof()
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(s._buffer))
self.assertEqual(b'', bytes(stream._buffer))
self.assertTrue(s._eof)
def test_unset_parser_eof_exc(self):
def p(out, buf):
try:
while True:
yield # read chunk
except parsers.EofStream:
raise ValueError()
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
stream.unset_parser()
self.assertIsInstance(s.exception(), ValueError)
self.assertIsNone(stream._parser)
def test_unset_parser_eof_unhandled_eof(self):
def p(out, buf):
while True:
yield # read chunk
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
stream.unset_parser()
self.assertIsInstance(s.exception(), RuntimeError)
self.assertFalse(s.is_eof())
def test_unset_parser_stop(self):
def p(out, buf):
try:
while True:
yield # read chunk
except parsers.EofStream:
out.feed_eof()
stream = parsers.StreamParser(loop=self.loop)
s = stream.set_parser(p)
stream.feed_data(b'line1')
stream.unset_parser()
self.assertTrue(s._eof)
def test_eof_exc(self):
def p(out, buf):
while True:
yield # read chunk
class CustomEofErr(Exception):
pass
stream = parsers.StreamParser(
eof_exc_class=CustomEofErr, loop=self.loop)
s = stream.set_parser(p)
stream.feed_eof()
self.assertIsInstance(s.exception(), CustomEofErr)
class TestStreamProtocol(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_connection_made(self):
tr = unittest.mock.Mock()
proto = parsers.StreamProtocol(loop=self.loop)
self.assertIsNone(proto.transport)
proto.connection_made(tr)
self.assertIs(proto.transport, tr)
def test_connection_lost(self):
proto = parsers.StreamProtocol(loop=self.loop)
proto.connection_made(unittest.mock.Mock())
proto.connection_lost(None)
self.assertIsNone(proto.transport)
self.assertIsNone(proto.writer)
self.assertTrue(proto.reader._eof)
def test_connection_lost_exc(self):
proto = parsers.StreamProtocol(loop=self.loop)
proto.connection_made(unittest.mock.Mock())
exc = ValueError()
proto.connection_lost(exc)
self.assertIs(proto.reader.exception(), exc)
def test_data_received(self):
proto = parsers.StreamProtocol(loop=self.loop)
proto.connection_made(unittest.mock.Mock())
proto.reader = unittest.mock.Mock()
proto.data_received(b'data')
proto.reader.feed_data.assert_called_with(b'data')
class TestParserBuffer(unittest.TestCase):
def setUp(self):
self.stream = unittest.mock.Mock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self):
return parsers.ParserBuffer()
def test_feed_data(self):
buf = self._make_one()
buf.feed_data(b'')
self.assertEqual(len(buf), 0)
buf.feed_data(b'data')
self.assertEqual(len(buf), 4)
self.assertEqual(bytes(buf), b'data')
def test_feed_data_after_exception(self):
buf = self._make_one()
buf.feed_data(b'data')
exc = ValueError()
buf.set_exception(exc)
buf.feed_data(b'more')
self.assertEqual(len(buf), 4)
self.assertEqual(bytes(buf), b'data')
def test_read_exc(self):
buf = self._make_one()
p = buf.read(3)
next(p)
p.send(b'1')
exc = ValueError()
buf.set_exception(exc)
self.assertIs(buf.exception(), exc)
self.assertRaises(ValueError, p.send, b'1')
def test_read_exc_multiple(self):
buf = self._make_one()
p = buf.read(3)
next(p)
p.send(b'1')
exc = ValueError()
buf.set_exception(exc)
self.assertIs(buf.exception(), exc)
p = buf.read(3)
self.assertRaises(ValueError, next, p)
def test_read(self):
buf = self._make_one()
p = buf.read(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'123')
self.assertEqual(b'4', bytes(buf))
def test_readsome(self):
buf = self._make_one()
p = buf.readsome(3)
next(p)
try:
p.send(b'1')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'1')
p = buf.readsome(2)
next(p)
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'23')
self.assertEqual(b'4', bytes(buf))
def test_readsome_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.readsome(3)
self.assertRaises(ValueError, next, p)
def test_wait(self):
buf = self._make_one()
p = buf.wait(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'123')
self.assertEqual(b'1234', bytes(buf))
def test_wait_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.wait(3)
self.assertRaises(ValueError, next, p)
def test_skip(self):
buf = self._make_one()
p = buf.skip(3)
next(p)
p.send(b'1')
try:
p.send(b'234')
except StopIteration as exc:
res = exc.value
self.assertIsNone(res)
self.assertEqual(b'4', bytes(buf))
def test_skip_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.skip(3)
self.assertRaises(ValueError, next, p)
def test_readuntil_limit(self):
buf = self._make_one()
p = buf.readuntil(b'\n', 4)
next(p)
p.send(b'1')
p.send(b'234')
self.assertRaises(errors.LineLimitExceededParserError, p.send, b'5')
buf = parsers.ParserBuffer()
p = buf.readuntil(b'\n', 4)
next(p)
self.assertRaises(
errors.LineLimitExceededParserError, p.send, b'12345\n6')
buf = parsers.ParserBuffer()
p = buf.readuntil(b'\n', 4)
next(p)
self.assertRaises(
errors.LineLimitExceededParserError, p.send, b'12345\n6')
def test_readuntil(self):
buf = self._make_one()
p = buf.readuntil(b'\n', 4)
next(p)
p.send(b'123')
try:
p.send(b'\n456')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'123\n')
self.assertEqual(b'456', bytes(buf))
def test_readuntil_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.readuntil(b'\n', 4)
self.assertRaises(ValueError, next, p)
def test_waituntil_limit(self):
buf = self._make_one()
p = buf.waituntil(b'\n', 4)
next(p)
p.send(b'1')
p.send(b'234')
self.assertRaises(errors.LineLimitExceededParserError, p.send, b'5')
buf = parsers.ParserBuffer()
p = buf.waituntil(b'\n', 4)
next(p)
self.assertRaises(
errors.LineLimitExceededParserError, p.send, b'12345\n6')
buf = parsers.ParserBuffer()
p = buf.waituntil(b'\n', 4)
next(p)
self.assertRaises(
errors.LineLimitExceededParserError, p.send, b'12345\n6')
def test_waituntil(self):
buf = self._make_one()
p = buf.waituntil(b'\n', 4)
next(p)
p.send(b'123')
try:
p.send(b'\n456')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, b'123\n')
self.assertEqual(b'123\n456', bytes(buf))
def test_waituntil_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.waituntil(b'\n', 4)
self.assertRaises(ValueError, next, p)
def test_skipuntil(self):
buf = self._make_one()
p = buf.skipuntil(b'\n')
next(p)
p.send(b'123')
try:
p.send(b'\n456\n')
except StopIteration:
pass
self.assertEqual(b'456\n', bytes(buf))
p = buf.skipuntil(b'\n')
try:
next(p)
except StopIteration:
pass
self.assertEqual(b'', bytes(buf))
def test_skipuntil_exc(self):
buf = self._make_one()
buf.set_exception(ValueError())
p = buf.skipuntil(b'\n')
self.assertRaises(ValueError, next, p)
def test_lines_parser(self):
out = parsers.FlowControlDataQueue(self.stream, loop=self.loop)
buf = self._make_one()
p = parsers.LinesParser()(out, buf)
next(p)
for d in (b'line1', b'\r\n', b'lin', b'e2\r', b'\ndata'):
p.send(d)
self.assertEqual(
[(bytearray(b'line1\r\n'), 7), (bytearray(b'line2\r\n'), 7)],
list(out._buffer))
try:
p.throw(parsers.EofStream())
except StopIteration:
pass
self.assertEqual(bytes(buf), b'data')
def test_chunks_parser(self):
out = parsers.FlowControlDataQueue(self.stream, loop=self.loop)
buf = self._make_one()
p = parsers.ChunksParser(5)(out, buf)
next(p)
for d in (b'line1', b'lin', b'e2d', b'ata'):
p.send(d)
self.assertEqual(
[(bytearray(b'line1'), 5), (bytearray(b'line2'), 5)],
list(out._buffer))
try:
p.throw(parsers.EofStream())
except StopIteration:
pass
self.assertEqual(bytes(buf), b'data')
| |
import json
import os
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils.spider import iterate_spider_output
from gerapy import get_logger
from gerapy.server.core.utils import process_request, process_response, process_item
logger = get_logger(__name__)
class SpiderParser():
"""
Spider parser for debugging of one step
"""
items = []
requests = []
response = None
default_callback = 'parse'
def __init__(self, settings, spider, args):
"""
init parser
:param settings:
:param spider:
:param args:
"""
self.args = args
self.spider = spider
self.crawler_process = CrawlerRunner(settings)
self.spider_loader = self.crawler_process.spider_loader
self.spidercls = self.spider_loader.load(self.spider)
def get_callback(self, request):
"""
get callback from obj or rules
:param request:
:return:
"""
if getattr(self.spidercls, 'rules', None):
rules = self.spidercls.rules
# rule_index = request.meta.get('rule', -1)
# if rule_index >= 0 and rule_index < len(rules):
# rule = rules[rule_index]
# return rule.callback
for rule in rules:
if rule.link_extractor.matches(request.url):
return rule.callback
return self.default_callback
def run_callback(self, response, cb):
"""
run callback and get items and requests
:param response:
:param cb:
:return:
"""
items, requests = [], []
for x in iterate_spider_output(cb(response)):
if isinstance(x, (BaseItem, dict)):
items.append(x)
elif isinstance(x, Request):
requests.append(x)
return items, requests
def prepare_request(self, spider, request, args):
"""
get request
:param spider:
:param request:
:param args:
:return:
"""
def callback(response):
"""
this callback wraps truly request's callback to get follows
:param response:
:return:
"""
# if no callback, use default parse callback of CrawlSpider
cb = self.args.callback or self.default_callback
# change un-callable callback to callable callback
if not callable(cb):
cb_method = getattr(spider, cb, None)
if callable(cb_method):
cb = cb_method
# run truly callback to get items and requests, then to this method
items, requests = self.run_callback(response, cb)
# process request callback
for request in requests:
request.callback = self.get_callback(request)
request.meta['callback'] = request.callback
# process items and requests and response
self.items += list(map(lambda item: process_item(item), items))
self.requests += list(map(lambda request: process_request(request), requests))
self.response = process_response(response)
# update meta
if args.meta:
request.meta.update(args.meta)
# update method
request.method = args.method if args.method else request.method
# update request body for post or other methods
if request.method.lower() != 'get':
# to be detailed, temp defined
if isinstance(args.body, dict):
request = request.replace(body=json.dumps(args.body))
else:
request = request.replace(body=args.body)
# update headers
request.headers = args.headers if args.headers else request.headers
# update cookies
request.cookies = args.cookies if args.cookies else request.cookies
# update dont_filter
request.dont_filter = args.filter if hasattr(
args, 'filter') else request.dont_filter
# update priority
request.priority = int(args.priority) if hasattr(
args, 'priority') else request.priority
# update callback
request.callback = callback
return request
def run(self):
"""
run main
:return:
"""
request = Request(self.args.url, callback=None)
def start_requests(spider): return [
self.prepare_request(spider, request, self.args)]
self.spidercls.start_requests = start_requests
self.crawler_process.crawl(self.spidercls)
if not len(self.crawler_process.crawlers) > 0:
return {'ok': False}
# init pcrawler
self.pcrawler = list(self.crawler_process.crawlers)[0]
d = self.crawler_process.join()
d.addBoth(lambda _: reactor.stop())
reactor.run()
return {
'items': self.items,
'requests': self.requests,
'response': self.response,
'ok': True
}
def get_follow_requests_and_items(project_path, spider_name, args):
"""
get follows
:param project_path:
:param spider_name:
:param args:
:return:
"""
work_cwd = os.getcwd()
try:
os.chdir(project_path)
settings = get_project_settings()
sp = SpiderParser(settings, spider_name, args)
results = sp.run()
return results
finally:
os.chdir(work_cwd)
def get_start_requests(project_path, spider_name):
"""
get start requests
:param project_path: project path
:param spider_name: spider name
:return:
"""
work_cwd = os.getcwd()
try:
# change work dir
os.chdir(project_path)
# load settings
settings = get_project_settings()
runner = CrawlerRunner(settings=settings)
# add crawler
spider_cls = runner.spider_loader.load(spider_name)
runner.crawl(spider_cls)
# get crawler
crawler = list(runner.crawlers)[0]
# get spider by crawler
spider = crawler.spider
# get start requests
requests = list(spider.start_requests())
if not requests and hasattr(spider, 'start'):
requests = list(spider.start())
requests = list(map(lambda r: process_request(r), requests))
return {'finished': True, 'requests': requests}
finally:
os.chdir(work_cwd)
| |
"""
Copyright 2015-2016 @_rc0r <hlt99@blinkenshell.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import socket
import twitter
from urllib.error import URLError
import afl_utils
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
db_table_spec = """`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, `last_update` INTEGER NOT NULL, `start_time`INTEGER NOT NULL,
`fuzzer_pid` INTEGER NOT NULL, `cycles_done` INTEGER NOT NULL, `execs_done` INTEGER NOT NULL,
`execs_per_sec` REAL NOT NULL, `paths_total` INTEGER NOT NULL, `paths_favored` INTEGER NOT NULL,
`paths_found` INTEGER NOT NULL, `paths_imported` INTEGER NOT NULL, `max_depth` INTEGER NOT NULL,
`cur_path` INTEGER NOT NULL, `pending_favs` INTEGER NOT NULL, `pending_total` INTEGER NOT NULL,
`variable_paths` INTEGER NOT NULL, `stability` REAL, `bitmap_cvg` REAL NOT NULL,
`unique_crashes` INTEGER NOT NULL, `unique_hangs` INTEGER NOT NULL, `last_path` INTEGER NOT NULL,
`last_crash` INTEGER NOT NULL, `last_hang` INTEGER NOT NULL, `execs_since_crash` INTEGER NOT NULL,
`exec_timeout` INTEGER NOT NULL, `afl_banner` VARCHAR(200) NOT NULL, `afl_version` VARCHAR(10) NOT NULL,
`command_line` VARCHAR(1000)"""
def show_info():
print(clr.CYA + "afl-stats " + clr.BRI + "%s" % afl_utils.__version__ + clr.RST + " by %s" % afl_utils.__author__)
print("Send stats of afl-fuzz jobs to Twitter.")
print("")
def read_config(config_file):
config_file = os.path.abspath(os.path.expanduser(config_file))
if not os.path.isfile(config_file):
print_err("Config file not found!")
sys.exit(1)
with open(config_file, 'r') as raw_config:
config = json.load(raw_config)
return config
def twitter_init(config):
try:
config['twitter_creds_file'] = os.path.abspath(os.path.expanduser(config['twitter_creds_file']))
if not os.path.exists(config['twitter_creds_file']):
twitter.oauth_dance("fuzzer_stats", config['twitter_consumer_key'],
config['twitter_consumer_secret'], config['twitter_creds_file'])
oauth_token, oauth_secret = twitter.read_token_file(config['twitter_creds_file'])
twitter_instance = twitter.Twitter(auth=twitter.OAuth(oauth_token, oauth_secret,
config['twitter_consumer_key'],
config['twitter_consumer_secret']))
return twitter_instance
except (twitter.TwitterHTTPError, URLError):
print_err("Network error, twitter login failed! Check your connection!")
sys.exit(1)
def shorten_tweet(tweet):
if len(tweet) > 140:
print_ok("Status too long, will be shortened to 140 chars!")
short_tweet = tweet[:137] + "..."
else:
short_tweet = tweet
return short_tweet
def fuzzer_alive(pid):
try:
os.kill(pid, 0)
except (OSError, ProcessLookupError):
return 0
return 1
def parse_stat_file(stat_file, summary=True):
try:
f = open(stat_file, "r")
lines = f.readlines()
f.close()
summary_stats = {
'fuzzer_pid': None,
'execs_done': None,
'execs_per_sec': None,
'paths_total': None,
'paths_favored': None,
'pending_favs': None,
'pending_total': None,
'unique_crashes': None,
'unique_hangs': None,
'afl_banner': None
}
complete_stats = {
'last_update': '',
'start_time': '',
'fuzzer_pid': '',
'cycles_done': '',
'execs_done': '',
'execs_per_sec': '',
'paths_total': '',
'paths_favored': '',
'paths_found': '',
'paths_imported': '',
'max_depth': '',
'cur_path': '',
'pending_favs': '',
'pending_total': '',
'variable_paths': '',
'stability': '',
'bitmap_cvg': '',
'unique_crashes': '',
'unique_hangs': '',
'last_path': '',
'last_crash': '',
'last_hang': '',
'execs_since_crash': '',
'exec_timeout': '',
'afl_banner': '',
'afl_version': '',
'command_line': ''
}
for l in lines:
if summary:
stats = summary_stats
for k in stats.keys():
if k != "fuzzer_pid":
if k in l:
stats[k] = l[19:].strip(": \r\n")
else:
if k in l:
stats[k] = fuzzer_alive(int(l[19:].strip(": \r\n")))
else:
stats = complete_stats
for k in stats.keys():
if k in l:
stats[k] = l[19:].strip(": %\r\n")
return stats
except FileNotFoundError as e:
print_warn("Stat file " + clr.GRA + "%s" % e.filename + clr.RST + " not found!")
return None
def load_stats(fuzzer_dir, summary=True):
fuzzer_dir = os.path.abspath(os.path.expanduser(fuzzer_dir))
if not os.path.isdir(fuzzer_dir):
print_warn("Invalid fuzzing directory specified: " + clr.GRA + "%s" % fuzzer_dir + clr.RST)
return None
fuzzer_stats = []
if os.path.isfile(os.path.join(fuzzer_dir, "fuzzer_stats")):
# single afl-fuzz job
stats = parse_stat_file(os.path.join(fuzzer_dir, "fuzzer_stats"), summary)
if stats:
fuzzer_stats.append(stats)
else:
fuzzer_inst = []
for fdir in os.listdir(fuzzer_dir):
if os.path.isdir(os.path.join(fuzzer_dir, fdir)):
fuzzer_inst.append(os.path.join(fuzzer_dir, fdir, "fuzzer_stats"))
for stat_file in fuzzer_inst:
stats = parse_stat_file(stat_file, summary)
if stats:
fuzzer_stats.append(stats)
return fuzzer_stats
def summarize_stats(stats):
sum_stat = {
'fuzzers': len(stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for s in stats:
for k in sum_stat.keys():
if k in s.keys():
if k != "afl_banner":
sum_stat[k] += float(s[k])
else:
sum_stat[k] = s[k][:10]
return sum_stat
def diff_stats(sum_stats, old_stats):
if len(sum_stats) != len(old_stats):
print_warn("Stats corrupted for '" + clr.GRA + "%s" % sum_stats['afl_banner'] + clr.RST + "'!")
return None
diff_stat = {
'fuzzers': len(sum_stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for k in sum_stats.keys():
if k not in ['afl_banner', 'host']:
diff_stat[k] = sum_stats[k] - old_stats[k]
else:
diff_stat[k] = sum_stats[k]
return diff_stat
def prettify_stat(stat, dstat, console=True):
_stat = stat.copy()
_dstat = dstat.copy()
_stat['execs_done'] /= 1e6
_dstat['execs_done'] /= 1e6
if _dstat['fuzzer_pid'] == _dstat['fuzzers'] == 0:
ds_alive = ""
else:
ds_alive = " (%+d/%+d)" % (_dstat['fuzzer_pid'], _dstat['fuzzers'])
# if int(_dstat['execs_done']) == 0:
if _dstat['execs_done'] == 0:
ds_exec = " "
else:
ds_exec = " (%+d) " % _dstat['execs_done']
if _dstat['execs_per_sec'] == 0:
ds_speed = " "
else:
ds_speed = " (%+1.f) " % _dstat['execs_per_sec']
if _dstat['pending_total'] == _dstat['pending_favs'] == 0:
ds_pend = ""
else:
ds_pend = " (%+d/%+d)" % (_dstat['pending_total'], _dstat['pending_favs'])
if _dstat['unique_crashes'] == 0:
ds_crash = ""
else:
ds_crash = " (%+d)" % _dstat['unique_crashes']
if console:
# colorize stats
_stat['afl_banner'] = clr.BLU + _stat['afl_banner'] + clr.RST
_stat['host'] = clr.LBL + _stat['host'] + clr.RST
lbl = clr.GRA
if _stat['fuzzer_pid'] == 0:
alc = clr.LRD
slc = clr.GRA
else:
alc = clr.LGN if _stat['fuzzer_pid'] == _stat['fuzzers'] else clr.YEL
slc = ""
clc = clr.MGN if _stat['unique_crashes'] == 0 else clr.LRD
rst = clr.RST
# colorize diffs
if _dstat['fuzzer_pid'] < 0 or _dstat['fuzzers'] < 0:
ds_alive = clr.RED + ds_alive + clr.RST
else:
ds_alive = clr.GRN + ds_alive + clr.RST
# if int(_dstat['execs_done']) < 0:
if _dstat['execs_done'] < 0:
ds_exec = clr.RED + ds_exec + clr.RST
else:
ds_exec = clr.GRN + ds_exec + clr.RST
if _dstat['execs_per_sec'] < 0:
ds_speed = clr.RED + ds_speed + clr.RST
else:
ds_speed = clr.GRN + ds_speed + clr.RST
if _dstat['unique_crashes'] < 0:
ds_crash = clr.RED + ds_crash + clr.RST
else:
ds_crash = clr.GRN + ds_crash + clr.RST
ds_pend = clr.GRA + ds_pend + clr.RST
pretty_stat =\
"[%s on %s]\n %sAlive:%s %s%d/%d%s%s\n %sExecs:%s %d%sm\n %sSpeed:%s %s%.1f%sx/s%s\n %sPend:%s %d/%d%s\n" \
" %sCrashes:%s %s%d%s%s" % (_stat['afl_banner'], _stat['host'], lbl, rst, alc, _stat['fuzzer_pid'],
_stat['fuzzers'], rst, ds_alive, lbl, rst, _stat['execs_done'], ds_exec, lbl, rst, slc,
_stat['execs_per_sec'], ds_speed, rst, lbl, rst, _stat['pending_total'],
_stat['pending_favs'], ds_pend, lbl, rst, clc, _stat['unique_crashes'], rst, ds_crash)
else:
pretty_stat = "[%s #%s]\nAlive: %d/%d%s\nExecs: %d%sm\nSpeed: %.1f%sx/s\n" \
"Pend: %d/%d%s\nCrashes: %d%s" %\
(_stat['afl_banner'], _stat['host'], _stat['fuzzer_pid'], _stat['fuzzers'], ds_alive,
_stat['execs_done'], ds_exec, _stat['execs_per_sec'], ds_speed,
_stat['pending_total'], _stat['pending_favs'], ds_pend, _stat['unique_crashes'], ds_crash)
return pretty_stat
def dump_stats(config_settings, database):
for sync_dir in config_settings['fuzz_dirs']:
fuzzer_stats = load_stats(sync_dir, summary=False)
for fuzzer in fuzzer_stats:
# create different table for every afl instance
# table = 'fuzzer_stats_{}'.format(fuzzer['afl_banner'])
#
# django compatible: put everything into one table (according
# to django plots app model)
# Differentiate data based on afl_banner, so don't override
# it manually! afl-multicore will create a unique banner for
# every fuzzer!
table = 'aflutils_fuzzerstats'
database.init_database(table, db_table_spec)
if not database.dataset_exists(table, fuzzer, ['last_update', 'afl_banner']):
database.insert_dataset(table, fuzzer)
def fetch_stats(config_settings, twitter_inst):
stat_dict = dict()
for fuzzer in config_settings['fuzz_dirs']:
stats = load_stats(fuzzer)
if not stats:
continue
sum_stats = summarize_stats(stats)
try:
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
old_stats = json.load(f)
except FileNotFoundError:
old_stats = sum_stats.copy()
# initialize/update stat_dict
stat_dict[fuzzer] = (sum_stats, old_stats)
stat_change = diff_stats(sum_stats, old_stats)
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
json.dump(sum_stats, f)
print(prettify_stat(sum_stats, stat_change, True))
tweet = prettify_stat(sum_stats, stat_change, False)
l = len(tweet)
c = clr.LRD if l > 140 else clr.LGN
if twitter_inst:
print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")
try:
twitter_inst.statuses.update(status=shorten_tweet(tweet))
except (twitter.TwitterHTTPError, URLError):
print_warn("Problem connecting to Twitter! Tweet not sent!")
except Exception as e:
print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")
def main(argv):
parser = argparse.ArgumentParser(description="Post selected contents of fuzzer_stats to Twitter.",
usage="afl-stats [-h] [-c config] [-d database] [-t]\n")
parser.add_argument("-c", "--config", dest="config_file",
help="afl-stats config file (Default: afl-stats.conf)!", default="afl-stats.conf")
parser.add_argument("-d", "--database", dest="database_file",
help="Dump stats history into database.")
parser.add_argument('-t', '--twitter', dest='twitter', action='store_const', const=True,
help='Post stats to twitter (Default: off).', default=False)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_const', const=True,
help='Suppress any output (Default: off).', default=False)
args = parser.parse_args(argv[1:])
if not args.quiet:
show_info()
if args.database_file:
db_file = os.path.abspath(os.path.expanduser(args.database_file))
else:
db_file = None
if db_file:
lite_db = con_sqlite.sqliteConnector(db_file, verbose=False)
else:
lite_db = None
config_settings = read_config(args.config_file)
if lite_db:
dump_stats(config_settings, lite_db)
lite_db.commit_close()
if args.twitter:
twitter_inst = twitter_init(config_settings)
else:
twitter_inst = None
fetch_stats(config_settings, twitter_inst)
if __name__ == "__main__":
main(sys.argv)
| |
import mxnet as mx
import numpy as np
from mxnet.executor_manager import _split_input_slice
from rcnn.config import config
from rcnn.io.image import tensor_vstack
from rcnn.io.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor
from rcnn.io.rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
# save parameters as properties
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = None
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, \
mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb)
else:
data, label, im_info = get_rcnn_testbatch(roidb)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class ROIIter(mx.io.DataIter):
def __init__(self, roidb, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: ROIIter
"""
super(ROIIter, self).__init__()
# save parameters as properties
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data', 'rois']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slices
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get date for each device
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rcnn_batch(iroidb)
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
class AnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(AnchorLoader, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
# each element in the list is the data used by different gpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
# generate new label data
new_label_list = []
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'],
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
class AnchorLoaderAddcls(mx.io.DataIter):
def __init__(self, feat_sym, roidb, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(AnchorLoaderAddcls, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes','roi_info']
else:
self.data_name = ['data','roi_info']
self.label_name = ['label', 'bbox_target', 'bbox_weight','gtlabel']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label['gtlabel'] = np.empty(1)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
# each element in the list is the data used by different gpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
# generate new label data
new_label_list = []
num_idx=0
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
data['roi_info'] = np.array([num_idx,data['gt_boxes'].flatten()[0],
data['gt_boxes'].flatten()[1], data['gt_boxes'].flatten()[2],
data['gt_boxes'].flatten()[3]]).flatten()
#print(data['roi_info'])
#print(label)
# assign anchor for label
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'],
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
label['gtlabel'] = data['gt_boxes'][0,0,4].flatten()
#print("label:")
#print(label['gtlabel'])
new_label_list.append(label)
num_idx = num_idx + 1
# print(data_list)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_data['roi_info']=all_data['roi_info'].reshape(-1,5)
#print(all_data['roi_info'])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
| |
# -*- coding: utf-8 -*-
# Copyright 2009-2013, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Classes related to XMLSchema facets.
The definitions herein are from sections U{4.2<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>}
and U{4.3<http://www.w3.org/TR/xmlschema-2/index.html#rf-facets>} of
U{XML Schema Part 2: Datatypes<http://www.w3.org/TR/xmlschema-2/>}.
Facets are attributes of a datatype that constrain its lexical and
value spaces.
"""
import logging
import re
import decimal
import pyxb
from . import datatypes
from . import basis
from pyxb.utils import utility, six
_log = logging.getLogger(__name__)
class Facet (pyxb.cscRoot):
"""The base class for facets.
This provides association with STDs, a name, and a value for the facet.
"""
_Name = None
@classmethod
def Name (self):
"""The name of a facet is a class constant."""
return self._Name
__baseTypeDefinition = None
def baseTypeDefinition (self):
"""The SimpleTypeDefinition component restricted by this facet.
Note: this is NOT the STD to which the facet belongs, but is
usually that STD's base type. I.e., this jumps us through all
the containing restrictions and extensions to get to the core
type definition."""
return self.__baseTypeDefinition
__ownerTypeDefinition = None
def ownerTypeDefinition (self):
"""The SimpleTypeDefinition component to which this facet belongs.
I.e., the one in which the hasFacet specification was found.
This value is None if the facet is not associated with an
STD."""
return self.__ownerTypeDefinition
# The default valueDatatype to use for instances of this class.
# This is overridden in subclasses that do not use late value
# datatype bindings.
_ValueDatatype = None
# The datatype used for facet values.
__valueDatatype = None
def valueDatatype (self):
"""Get the datatype used to represent values of the facet.
This usually has nothing to do with the owner datatype; for
example, the length facet may apply to any STD but the value
of the facet is an integer. In generated bindings this is
usually set explicitly in the facet constructor; when
processing a schema, it is derived from the value's type
definition.
"""
if self.__valueDatatype is None:
assert self.baseTypeDefinition() is not None
return self.baseTypeDefinition().pythonSupport()
return self.__valueDatatype
__value = None
def _value (self, v): self.__value = v
def value (self): return self.__value
__annotation = None
def annotation (self): return self.__annotation
def __init__ (self, **kw):
"""Create a facet instance, initializing it from the keyword parameters."""
super(Facet, self).__init__(**kw)
# Can't create base class instances
assert Facet != self.__class__
self.setFromKeywords(_reset=True, _constructor=True, **kw)
def _setFromKeywords_vb (self, **kw):
"""Configure values of the facet from a set of keywords.
This method is pre-extended; subclasses should invoke the
parent method after setting their local configuration.
@keyword _reset: If C{False} or missing, existing values will
be retained if they do not appear in the
keywords. If C{True}, members not defined in
the keywords are set to a default.
@keyword base_type_definition:
@keyword owner_type_definition:
@keyword value_datatype:
"""
if not kw.get('_reset', False):
kw.setdefault('base_type_definition', self.__baseTypeDefinition)
kw.setdefault('owner_type_definition', self.__ownerTypeDefinition)
kw.setdefault('value_datatype', self.__valueDatatype)
self.__baseTypeDefinition = kw.get('base_type_definition')
self.__ownerTypeDefinition = kw.get('owner_type_definition')
self.__valueDatatype = kw.get('value_datatype', self._ValueDatatype)
# Verify that there's enough information that we should be
# able to identify a PST suitable for representing facet
# values.
assert (self.__valueDatatype is not None) or (self.__baseTypeDefinition is not None)
super_fn = getattr(super(Facet, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
def setFromKeywords (self, **kw):
"""Public entrypoint to the _setFromKeywords_vb call hierarchy."""
return self._setFromKeywords_vb(**kw)
@classmethod
def ClassForFacet (cls, name):
"""Given the name of a facet, return the Facet subclass that represents it."""
assert cls != Facet
if 0 <= name.find(':'):
name = name.split(':', 1)[1]
facet_class = globals().get('%s_%s' % (cls._FacetPrefix, name))
if facet_class is None:
raise pyxb.LogicError('Unrecognized facet name %s: expect %s' % (name, ','.join([_f._Name for _f in cls.Facets])))
assert facet_class is not None
return facet_class
def _valueString (self):
if isinstance(self, _CollectionFacet_mixin):
return six.u(',').join([ six.text_type(_i) for _i in six.iteritems(self) ])
if (self.valueDatatype() is not None) and (self.value() is not None):
try:
return self.valueDatatype().XsdLiteral(self.value())
except Exception:
_log.exception('Stringize facet %s produced exception', self.Name())
raise
return six.text_type(self.value())
def __str__ (self):
rv = []
rv.append('%s="%s"' % (self.Name(), self._valueString()))
if isinstance(self, _Fixed_mixin) and self.fixed():
rv.append('[fixed]')
return ''.join(rv)
class ConstrainingFacet (Facet):
"""One of the facets defined in section 4.3, which provide
constraints on the lexical space of a type definition."""
# The prefix used for Python classes used for a constraining
# facet. Note that this is not the prefix used when generating a
# Python class member that specifies a constraining instance, even
# if it happens to be the same digraph.
_FacetPrefix = 'CF'
def __init__ (self, **kw):
super(ConstrainingFacet, self).__init__(**kw)
def _validateConstraint_vx (self, value):
raise pyxb.LogicError("Facet %s does not implement constraints" % (self.Name(),))
def validateConstraint (self, value):
"""Return True iff the given value satisfies the constraint represented by this facet instance.
The actual test is delegated to the subclasses."""
return self._validateConstraint_vx(value)
def __setFromKeywords(self, **kw):
kwv = kw.get('value')
if kwv is not None:
vdt = self.valueDatatype()
if not isinstance(kwv, vdt):
kwv = vdt(kwv)
self._value(kwv)
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
Additional keywords:
* value
"""
# NB: This uses post-extension because it makes reference to the value_data_type
super_fn = getattr(super(ConstrainingFacet, self), '_setFromKeywords_vb', lambda *a,**kw: self)
rv = super_fn(**kw)
self.__setFromKeywords(**kw)
return rv
class _LateDatatype_mixin (pyxb.cscRoot):
"""Marker class to indicate that the facet instance must be told
its datatype when it is constructed.
This is necessary for facets like L{CF_minInclusive} and
L{CF_minExclusive}, for which the value is determined by the base
type definition of the associated STD. In some cases the value
that must be used in the facet cannot be represented in the Python
type used for the facet; see L{LateDatatypeBindsSuperclass}.
"""
_LateDatatypeBindsSuperclass = None
"""The class variable that indicates that the Subclasses must
override this variable with a value of C{True} or C{False}. The
value is C{True} iff the value used for the facet is not within
the value space of the corresponding value datatype; for example,
L{CF_minExclusive}."""
@classmethod
def LateDatatypeBindsSuperclass (cls):
"""Return true if false if the proposed datatype should be
used, or True if the base type definition of the proposed
datatype should be used."""
if cls._LateDatatypeBindsSuperclass is None:
raise pyxb.LogicError('Class %s did not set _LateDatatypeBindsSuperclass variable.')
return cls._LateDatatypeBindsSuperclass
@classmethod
def BindingValueDatatype (cls, value_type):
"""Find the datatype for facet values when this facet is bound
to the given value_type.
If the C{value_type} is an STD, the associated Python support
datatype from this value_type scanning up through the base
type hierarchy is used.
"""
import pyxb.xmlschema.structures as structures
if isinstance(value_type, structures.SimpleTypeDefinition):
# Back up until we find something that actually has a
# datatype
while not value_type.hasPythonSupport():
value_type = value_type.baseTypeDefinition()
value_type = value_type.pythonSupport()
assert issubclass(value_type, basis.simpleTypeDefinition)
if cls.LateDatatypeBindsSuperclass():
value_type = value_type.XsdSuperType()
return value_type
def bindValueDatatype (self, value_datatype):
self.setFromKeywords(_constructor=True, value_datatype=self.BindingValueDatatype(value_datatype))
class _Fixed_mixin (pyxb.cscRoot):
"""Mix-in to a constraining facet that adds support for the 'fixed' property."""
__fixed = None
def fixed (self): return self.__fixed
def __setFromKeywords (self, **kw):
if kw.get('_reset', False):
self.__fixed = None
kwv = kw.get('fixed')
if kwv is not None:
self.__fixed = datatypes.boolean(kwv)
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
Additional keywords:
* fixed
"""
self.__setFromKeywords(**kw)
super_fn = getattr(super(_Fixed_mixin, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
class _CollectionFacet_mixin (pyxb.cscRoot):
"""Mix-in to handle facets whose values are collections, not scalars.
For example, the enumeration and pattern facets maintain a list of
enumeration values and patterns, respectively, as their value
space.
Subclasses must define a class variable _CollectionFacet_itemType
which is a reference to a class that is used to construct members
of the collection.
"""
__items = None
def _setFromKeywords_vb (self, **kw):
"""Extend base class.
@keyword _constructor: If C{False} or absent, the object being
set is a member of the collection. If
C{True}, the object being set is the
collection itself.
"""
if kw.get('_reset', False):
self.__items = []
if not kw.get('_constructor', False):
self.__items.append(self._CollectionFacet_itemType(facet_instance=self, **kw))
super_fn = getattr(super(_CollectionFacet_mixin, self), '_setFromKeywords_vb', lambda *a,**kw: self)
return super_fn(**kw)
def _items (self):
"""The members of the collection, as a reference."""
return self.__items
def items (self):
"""The members of the collection, as a copy."""
return self.__items[:]
def iteritems (self):
"""The members of the collection as an iterator"""
return iter(self.__items)
class CF_length (ConstrainingFacet, _Fixed_mixin):
"""A facet that specifies the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-length}
"""
_Name = 'length'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length == self.value())
class CF_minLength (ConstrainingFacet, _Fixed_mixin):
"""A facet that constrains the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minLength}
"""
_Name = 'minLength'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length >= self.value())
class CF_maxLength (ConstrainingFacet, _Fixed_mixin):
"""A facet that constrains the length of the lexical representation of a value.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minLength}
"""
_Name = 'maxLength'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
value_length = value.xsdValueLength()
return (value_length is None) or (self.value() is None) or (value_length <= self.value())
import pyxb.utils.xmlre
class _PatternElement (utility.PrivateTransient_mixin):
"""This class represents individual patterns that appear within a CF_pattern collection."""
# The compiled regular expression is marked transient because we
# normally do development with Python 2.5, and consequently save
# the pickled namespace archives that go into the distribution
# with that version. Compiled regular expressions in Python 2.5
# include a reference to the re._compile method, which does not
# exist in Python 2.4. As a result, attempts to load a namespace
# which includes types with pattern restrictions fail.
__PrivateTransient = set()
__compiledExpression = None
__PrivateTransient.add('compiledExpression')
__pythonExpression = None
pattern = None
annotation = None
def __init__ (self, pattern=None, value=None, annotation=None, **kw):
if pattern is None:
assert value is not None
pattern = value
assert isinstance(pattern, six.string_types)
self.pattern = pattern
if isinstance(annotation, six.string_types):
self.annotation = annotation
self.__pythonExpression = pyxb.utils.xmlre.XMLToPython(pattern)
super(_PatternElement, self).__init__()
def __str__ (self): return self.pattern
def matches (self, text):
if self.__compiledExpression is None:
self.__compiledExpression = re.compile(self.__pythonExpression)
return self.__compiledExpression.match(text)
class CF_pattern (ConstrainingFacet, _CollectionFacet_mixin):
"""A facet that constrains the lexical representation of a value
to match one of a set of patterns.
See U{http://www.w3.org/TR/xmlschema-2/#rf-pattern}
@note: In PyXB, pattern constraints are ignored for any type with
a Python representation that does not derive from a string type.
This is due to the difficulty in reconstructing the lexical
representation of a non-string type after it has been converted to
its value space.
@todo: On creating new instances of non-string simple types from
string representations, we could apply pattern constraints. That
would mean checking them prior to invoking the Factory method.
"""
_Name = 'pattern'
_CollectionFacet_itemType = _PatternElement
_ValueDatatype = datatypes.string
__patternElements = None
def patternElements (self): return self.__patternElements
def __init__ (self, **kw):
super(CF_pattern, self).__init__(**kw)
self.__patternElements = []
def addPattern (self, **kw):
pattern = self._CollectionFacet_itemType(**kw)
self.__patternElements.append(pattern)
return pattern
def _validateConstraint_vx (self, value):
# If validation is inhibited, or if the facet hasn't had any
# restrictions applied yet, return True.
if 0 == len(self.__patternElements):
return True
if not isinstance(value, six.string_types):
# Ignore pattern constraint when value space and lexical
# space differ.
return True
for pe in self.__patternElements:
if pe.matches(value):
return True
return False
@six.unicode_convertible
class _EnumerationElement (object):
"""This class represents individual values that appear within a
L{CF_enumeration} collection."""
__value = None
def value (self):
"""The Python value that is used for equality testing
against this enumeration.
This is an instance of L{enumeration.valueDatatype()<CF_enumeration.valueDatatype>},
initialized from the unicodeValue."""
return self.__value
__tag = None
def tag (self):
"""The Python identifier used for the named constant representing
the enumeration value.
This should include any desired prefix, since it must be
unique within its binding class. If C{None}, no enumeration
constant will be generated."""
return self.__tag
def _setTag (self, tag):
"""Set the tag to be used for this enumeration."""
self.__tag = tag
__enumeration = None
def enumeration (self):
"""A reference to the L{CF_enumeration} instance that owns this element."""
return self.__enumeration
__unicodeValue = None
def unicodeValue (self):
"""The unicode string that defines the enumeration value."""
return self.__unicodeValue
def __init__ (self, enumeration=None, unicode_value=None,
description=None, annotation=None, tag=None,
**kw):
# The preferred keyword is "unicode_value", but when being
# generically applied by
# structures.SimpleTypeDefinition.__updateFacets, the unicode
# value comes in through the keyword "value". Similarly for
# "enumeration" and "facet_instance".
value = kw.get('value', unicode_value)
if unicode_value is None:
unicode_value = value
if enumeration is None:
enumeration = kw['facet_instance']
self.__unicodeValue = unicode_value
self.__enumeration = enumeration
self.__description = description
self.__annotation = annotation
self.__tag = tag
assert self.__enumeration is not None
value_datatype = self.enumeration().valueDatatype()
self.__value = value_datatype.Factory(value, _validate_constraints=False, _from_xml=True)
if (self.__description is None) and (self.__annotation is not None):
self.__description = six.text_type(self.__annotation)
def __str__ (self):
return utility.QuotedEscaped(self.unicodeValue())
class CF_enumeration (ConstrainingFacet, _CollectionFacet_mixin, _LateDatatype_mixin):
"""Capture a constraint that restricts valid values to a fixed set.
A STD that has an enumeration restriction should mix-in
L{pyxb.binding.basis.enumeration_mixin}, and should have a class
variable titled C{_CF_enumeration} that is an instance of this
class.
"unicode" refers to the Unicode string by which the value is
represented in XML.
"tag" refers to the Python member reference associated with the
enumeration. The value is derived from the unicode value of the
enumeration element and an optional prefix that identifies the
owning simple type when the tag is promoted to module-level
visibility.
"value" refers to the Python value held in the tag
See U{http://www.w3.org/TR/xmlschema-2/#rf-enumeration}
"""
_Name = 'enumeration'
_CollectionFacet_itemType = _EnumerationElement
_LateDatatypeBindsSuperclass = False
__tagToElement = None
__valueToElement = None
__unicodeToElement = None
# The prefix to be used when making enumeration tags visible at
# the module level. If None, tags are not made visible.
__enumPrefix = None
def __init__ (self, **kw):
super(CF_enumeration, self).__init__(**kw)
self.__enumPrefix = kw.get('enum_prefix', self.__enumPrefix)
self.__tagToElement = { }
self.__valueToElement = { }
self.__unicodeToElement = { }
def enumPrefix (self):
return self.__enumPrefix
def elements (self):
"""@deprecated: Use L{items} or L{iteritems} instead."""
return list(six.iteritems(self))
def values (self):
"""Return a list of enumeration values."""
return [ _ee.value() for _ee in six.iteritems(self) ]
def itervalues (self):
"""Generate the enumeration values."""
for ee in six.iteritems(self):
yield ee.value()
def addEnumeration (self, **kw):
kw['enumeration'] = self
ee = _EnumerationElement(**kw)
assert not (ee.tag in self.__tagToElement)
self.__tagToElement[ee.tag()] = ee
self.__unicodeToElement[ee.unicodeValue()] = ee
value = ee.value()
# Not just issubclass(self.valueDatatype(), basis.STD_list);
# this may be a union with one of those as a member type.
if isinstance(value, list):
value = ' '.join([ _v.xsdLiteral() for _v in value ])
self.__valueToElement[value] = ee
self._items().append(ee)
return value
def elementForValue (self, value):
"""Return the L{_EnumerationElement} instance that has the given value.
@raise KeyError: the value is not valid for the enumeration."""
return self.__valueToElement[value]
def valueForUnicode (self, ustr):
"""Return the enumeration value corresponding to the given unicode string.
If ustr is not a valid option for this enumeration, return None."""
rv = self.__unicodeToElement.get(ustr)
if rv is not None:
rv = rv.value()
return rv
def _validateConstraint_vx (self, value):
# If validation is inhibited, or if the facet hasn't had any
# restrictions applied yet, return True.
if 0 == len(self._items()):
return True
for ee in six.iteritems(self):
if ee.value() == value:
return True
return False
class _Enumeration_mixin (pyxb.cscRoot):
"""Marker class to indicate that the generated binding has enumeration members."""
@classmethod
def valueForUnicode (cls, ustr):
return cls._CF_enumeration.valueForUnicode(ustr)
class _WhiteSpace_enum (datatypes.NMTOKEN, _Enumeration_mixin):
"""The enumeration used to constrain the whiteSpace facet"""
pass
_WhiteSpace_enum._CF_enumeration = CF_enumeration(value_datatype=_WhiteSpace_enum)
_WhiteSpace_enum.preserve = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=six.u('preserve'), tag='preserve')
_WhiteSpace_enum.replace = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=six.u('replace'), tag='replace')
_WhiteSpace_enum.collapse = _WhiteSpace_enum._CF_enumeration.addEnumeration(unicode_value=six.u('collapse'), tag='collapse')
# NOTE: For correctness we really need to initialize the facet map for
# WhiteSpace_enum, even though at the moment it isn't necessary. We
# can't right now, because its parent datatypes.NMTOKEN hasn't been
# initialized yet
_WhiteSpace_enum._InitializeFacetMap(_WhiteSpace_enum._CF_enumeration)
class CF_whiteSpace (ConstrainingFacet, _Fixed_mixin):
"""Specify the value-space interpretation of whitespace.
See U{http://www.w3.org/TR/xmlschema-2/#rf-whiteSpace}
"""
_Name = 'whiteSpace'
_ValueDatatype = _WhiteSpace_enum
__TabCRLF_re = re.compile("[\t\n\r]")
__MultiSpace_re = re.compile(" +")
def normalizeString (self, value):
"""Normalize the given string in accordance with the configured whitespace interpretation."""
if self.value() is None:
return value
if self.value() == _WhiteSpace_enum.preserve:
return utility.NormalizeWhitespace(value, preserve=True)
if self.value() == _WhiteSpace_enum.replace:
return utility.NormalizeWhitespace(value, replace=True)
assert self.value() == _WhiteSpace_enum.collapse, 'Unexpected value "%s" for whiteSpace facet' % (self.value(),)
return utility.NormalizeWhitespace(value, collapse=True)
def _validateConstraint_vx (self, value):
"""No validation rules for whitespace facet."""
return True
class CF_minInclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the minimum legal value for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minInclusive}
"""
_Name = 'minInclusive'
_LateDatatypeBindsSuperclass = False
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() <= value)
class CF_maxInclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the maximum legal value for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-maxInclusive}
"""
_Name = 'maxInclusive'
_LateDatatypeBindsSuperclass = False
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() >= value)
class CF_minExclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the exclusive lower bound of legal values for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-minExclusive}
"""
_Name = 'minExclusive'
_LateDatatypeBindsSuperclass = True
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() < value)
class CF_maxExclusive (ConstrainingFacet, _Fixed_mixin, _LateDatatype_mixin):
"""Specify the exclusive upper bound of legal values for the constrained type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-maxExclusive}
"""
_Name = 'maxExclusive'
_LateDatatypeBindsSuperclass = True
def _validateConstraint_vx (self, value):
return (self.value() is None) or (self.value() > value)
class CF_totalDigits (ConstrainingFacet, _Fixed_mixin):
"""Specify the number of digits in the *value* space of the type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-totalDigits}
"""
_Name = 'totalDigits'
_ValueDatatype = datatypes.positiveInteger
def _validateConstraint_vx (self, value):
if self.value() is None:
return True
if isinstance(value, datatypes.decimal):
(sign, digits, exponent) = value.normalize().as_tuple()
if len(digits) > self.value():
return False
if 0 > exponent:
return -exponent <= self.value()
return (exponent + len(digits)) <= self.value()
n = 0
scale = 1
match = False
v = None
while (n <= self.value()) and (not match):
v = six.long_type(value * scale)
match = ((value * scale) == v)
if self.value() == n:
break
n += 1
scale *= 10
while n < self.value():
n += 1
scale *= 10
return match and (v is not None) and (abs(v) < scale)
class CF_fractionDigits (ConstrainingFacet, _Fixed_mixin):
"""Specify the number of sub-unit digits in the *value* space of the type.
See U{http://www.w3.org/TR/xmlschema-2/#rf-fractionDigits}
"""
_Name = 'fractionDigits'
_ValueDatatype = datatypes.nonNegativeInteger
def _validateConstraint_vx (self, value):
if self.value() is None:
return True
if isinstance(value, datatypes.decimal):
(sign, digits, exponent) = value.normalize().as_tuple()
return (0 <= exponent) or (-exponent <= self.value())
n = 0
scale = 1
while n <= self.value():
if ((value * scale) == six.long_type(value * scale)):
return True
n += 1
scale *= 10
return False
class FundamentalFacet (Facet):
"""A fundamental facet provides information on the value space of the associated type."""
_FacetPrefix = 'FF'
@classmethod
def CreateFromDOM (cls, node, owner_type_definition, base_type_definition=None):
facet_class = cls.ClassForFacet(node.getAttribute('name'))
rv = facet_class(base_type_definition=base_type_definition,
owner_type_definition=owner_type_definition)
rv.updateFromDOM(node)
def updateFromDOM (self, node):
if not node.hasAttribute('name'):
raise pyxb.SchemaValidationError('No name attribute in facet')
assert node.getAttribute('name') == self.Name()
self._updateFromDOM(node)
def _updateFromDOM (self, node):
try:
super(FundamentalFacet, self)._updateFromDOM(node)
except AttributeError:
pass
if (self.valueDatatype() is not None) and node.hasAttribute('value'):
self._value(self.valueDatatype()(node.getAttribute('value')))
# @todo
self.__annotation = None
return self
class FF_equal (FundamentalFacet):
"""Specifies that the associated type supports a notion of equality.
See U{http://www.w3.org/TR/xmlschema-2/#equal}
"""
_Name = 'equal'
class FF_ordered (FundamentalFacet):
"""Specifies that the associated type supports a notion of order.
See U{http://www.w3.org/TR/xmlschema-2/#rf-ordered}
"""
_LegalValues = ( 'false', 'partial', 'total' )
_Name = 'ordered'
_ValueDatatype = datatypes.string
def __init__ (self, **kw):
# @todo: correct value type definition
super(FF_ordered, self).__init__(**kw)
class FF_bounded (FundamentalFacet):
"""Specifies that the associated type supports a notion of bounds.
See U{http://www.w3.org/TR/xmlschema-2/#rf-bounded}
"""
_Name = 'bounded'
_ValueDatatype = datatypes.boolean
class FF_cardinality (FundamentalFacet):
"""Specifies that the associated type supports a notion of length.
See U{http://www.w3.org/TR/xmlschema-2/#rf-cardinality}
"""
_LegalValues = ( 'finite', 'countably infinite' )
_Name = 'cardinality'
_ValueDatatype = datatypes.string
def __init__ (self, **kw):
# @todo correct value type definition
super(FF_cardinality, self).__init__(value_datatype=datatypes.string, **kw)
class FF_numeric (FundamentalFacet):
"""Specifies that the associated type represents a number.
See U{http://www.w3.org/TR/xmlschema-2/#rf-numeric}
"""
_Name = 'numeric'
_ValueDatatype = datatypes.boolean
# The fixed set of expected facets
ConstrainingFacet.Facets = [
CF_length, CF_minLength, CF_maxLength, CF_pattern, CF_enumeration,
CF_whiteSpace, CF_minInclusive, CF_maxInclusive, CF_minExclusive,
CF_maxExclusive, CF_totalDigits, CF_fractionDigits ]
FundamentalFacet.Facets = [
FF_equal, FF_ordered, FF_bounded, FF_cardinality, FF_numeric ]
Facet.Facets = []
Facet.Facets.extend(ConstrainingFacet.Facets)
Facet.Facets.extend(FundamentalFacet.Facets)
# Facet details from a hacked generator reading the normative schema
# and only printing the facet-related code.
datatypes.ENTITIES._CF_pattern = CF_pattern()
datatypes.ENTITIES._CF_maxLength = CF_maxLength()
datatypes.ENTITIES._CF_enumeration = CF_enumeration(value_datatype=datatypes.ENTITIES)
datatypes.ENTITIES._CF_minLength = CF_minLength(value=datatypes.nonNegativeInteger(1))
datatypes.ENTITIES._CF_whiteSpace = CF_whiteSpace()
datatypes.ENTITIES._CF_length = CF_length()
datatypes.ENTITIES._InitializeFacetMap(datatypes.ENTITIES._CF_pattern,
datatypes.ENTITIES._CF_maxLength,
datatypes.ENTITIES._CF_enumeration,
datatypes.ENTITIES._CF_minLength,
datatypes.ENTITIES._CF_whiteSpace,
datatypes.ENTITIES._CF_length)
datatypes.ENTITY._InitializeFacetMap()
datatypes.ID._InitializeFacetMap()
datatypes.IDREF._InitializeFacetMap()
datatypes.IDREFS._CF_pattern = CF_pattern()
datatypes.IDREFS._CF_maxLength = CF_maxLength()
datatypes.IDREFS._CF_enumeration = CF_enumeration(value_datatype=datatypes.IDREFS)
datatypes.IDREFS._CF_minLength = CF_minLength(value=datatypes.nonNegativeInteger(1))
datatypes.IDREFS._CF_whiteSpace = CF_whiteSpace()
datatypes.IDREFS._CF_length = CF_length()
datatypes.IDREFS._InitializeFacetMap(datatypes.IDREFS._CF_pattern,
datatypes.IDREFS._CF_maxLength,
datatypes.IDREFS._CF_enumeration,
datatypes.IDREFS._CF_minLength,
datatypes.IDREFS._CF_whiteSpace,
datatypes.IDREFS._CF_length)
datatypes.NCName._CF_pattern = CF_pattern()
datatypes.NCName._CF_pattern.addPattern(pattern=six.u('[\\i-[:]][\\c-[:]]*'))
datatypes.NCName._InitializeFacetMap(datatypes.NCName._CF_pattern)
datatypes.NMTOKEN._CF_pattern = CF_pattern()
datatypes.NMTOKEN._CF_pattern.addPattern(pattern=six.u('\\c+'))
datatypes.NMTOKEN._InitializeFacetMap(datatypes.NMTOKEN._CF_pattern)
datatypes.NMTOKENS._CF_pattern = CF_pattern()
datatypes.NMTOKENS._CF_maxLength = CF_maxLength()
datatypes.NMTOKENS._CF_enumeration = CF_enumeration(value_datatype=datatypes.NMTOKENS)
datatypes.NMTOKENS._CF_minLength = CF_minLength(value=datatypes.nonNegativeInteger(1))
datatypes.NMTOKENS._CF_whiteSpace = CF_whiteSpace()
datatypes.NMTOKENS._CF_length = CF_length()
datatypes.NMTOKENS._InitializeFacetMap(datatypes.NMTOKENS._CF_pattern,
datatypes.NMTOKENS._CF_maxLength,
datatypes.NMTOKENS._CF_enumeration,
datatypes.NMTOKENS._CF_minLength,
datatypes.NMTOKENS._CF_whiteSpace,
datatypes.NMTOKENS._CF_length)
datatypes.NOTATION._CF_minLength = CF_minLength()
datatypes.NOTATION._CF_maxLength = CF_maxLength()
datatypes.NOTATION._CF_enumeration = CF_enumeration(value_datatype=datatypes.NOTATION)
datatypes.NOTATION._CF_pattern = CF_pattern()
datatypes.NOTATION._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.NOTATION._CF_length = CF_length()
datatypes.NOTATION._InitializeFacetMap(datatypes.NOTATION._CF_minLength,
datatypes.NOTATION._CF_maxLength,
datatypes.NOTATION._CF_enumeration,
datatypes.NOTATION._CF_pattern,
datatypes.NOTATION._CF_whiteSpace,
datatypes.NOTATION._CF_length)
datatypes.Name._CF_pattern = CF_pattern()
datatypes.Name._CF_pattern.addPattern(pattern=six.u('\\i\\c*'))
datatypes.Name._InitializeFacetMap(datatypes.Name._CF_pattern)
datatypes.QName._CF_minLength = CF_minLength()
datatypes.QName._CF_maxLength = CF_maxLength()
datatypes.QName._CF_enumeration = CF_enumeration(value_datatype=datatypes.QName)
datatypes.QName._CF_pattern = CF_pattern()
datatypes.QName._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.QName._CF_length = CF_length()
datatypes.QName._InitializeFacetMap(datatypes.QName._CF_minLength,
datatypes.QName._CF_maxLength,
datatypes.QName._CF_enumeration,
datatypes.QName._CF_pattern,
datatypes.QName._CF_whiteSpace,
datatypes.QName._CF_length)
datatypes.anyURI._CF_minLength = CF_minLength()
datatypes.anyURI._CF_maxLength = CF_maxLength()
datatypes.anyURI._CF_enumeration = CF_enumeration(value_datatype=datatypes.anyURI)
datatypes.anyURI._CF_pattern = CF_pattern()
datatypes.anyURI._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.anyURI._CF_length = CF_length()
datatypes.anyURI._InitializeFacetMap(datatypes.anyURI._CF_minLength,
datatypes.anyURI._CF_maxLength,
datatypes.anyURI._CF_enumeration,
datatypes.anyURI._CF_pattern,
datatypes.anyURI._CF_whiteSpace,
datatypes.anyURI._CF_length)
datatypes.base64Binary._CF_minLength = CF_minLength()
datatypes.base64Binary._CF_maxLength = CF_maxLength()
datatypes.base64Binary._CF_enumeration = CF_enumeration(value_datatype=datatypes.base64Binary)
datatypes.base64Binary._CF_pattern = CF_pattern()
datatypes.base64Binary._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.base64Binary._CF_length = CF_length()
datatypes.base64Binary._InitializeFacetMap(datatypes.base64Binary._CF_minLength,
datatypes.base64Binary._CF_maxLength,
datatypes.base64Binary._CF_enumeration,
datatypes.base64Binary._CF_pattern,
datatypes.base64Binary._CF_whiteSpace,
datatypes.base64Binary._CF_length)
datatypes.boolean._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.boolean._CF_pattern = CF_pattern()
datatypes.boolean._InitializeFacetMap(datatypes.boolean._CF_whiteSpace,
datatypes.boolean._CF_pattern)
datatypes.byte._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.byte, value=datatypes.anySimpleType(six.u('-128')))
datatypes.byte._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.byte, value=datatypes.anySimpleType(six.u('127')))
datatypes.byte._InitializeFacetMap(datatypes.byte._CF_minInclusive,
datatypes.byte._CF_maxInclusive)
datatypes.date._CF_pattern = CF_pattern()
datatypes.date._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.date)
datatypes.date._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.date._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.date._CF_enumeration = CF_enumeration(value_datatype=datatypes.date)
datatypes.date._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.date._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.date)
datatypes.date._InitializeFacetMap(datatypes.date._CF_pattern,
datatypes.date._CF_minInclusive,
datatypes.date._CF_maxExclusive,
datatypes.date._CF_minExclusive,
datatypes.date._CF_enumeration,
datatypes.date._CF_whiteSpace,
datatypes.date._CF_maxInclusive)
datatypes.dateTime._CF_pattern = CF_pattern()
datatypes.dateTime._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.dateTime)
datatypes.dateTime._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.dateTime._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.dateTime._CF_enumeration = CF_enumeration(value_datatype=datatypes.dateTime)
datatypes.dateTime._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.dateTime._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.dateTime)
datatypes.dateTime._InitializeFacetMap(datatypes.dateTime._CF_pattern,
datatypes.dateTime._CF_minInclusive,
datatypes.dateTime._CF_maxExclusive,
datatypes.dateTime._CF_minExclusive,
datatypes.dateTime._CF_enumeration,
datatypes.dateTime._CF_whiteSpace,
datatypes.dateTime._CF_maxInclusive)
datatypes.decimal._CF_totalDigits = CF_totalDigits()
datatypes.decimal._CF_pattern = CF_pattern()
datatypes.decimal._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.decimal)
datatypes.decimal._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.decimal._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.decimal._CF_enumeration = CF_enumeration(value_datatype=datatypes.decimal)
datatypes.decimal._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.decimal._CF_fractionDigits = CF_fractionDigits()
datatypes.decimal._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.decimal)
datatypes.decimal._InitializeFacetMap(datatypes.decimal._CF_totalDigits,
datatypes.decimal._CF_pattern,
datatypes.decimal._CF_minInclusive,
datatypes.decimal._CF_maxExclusive,
datatypes.decimal._CF_minExclusive,
datatypes.decimal._CF_enumeration,
datatypes.decimal._CF_whiteSpace,
datatypes.decimal._CF_fractionDigits,
datatypes.decimal._CF_maxInclusive)
datatypes.double._CF_pattern = CF_pattern()
datatypes.double._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.double)
datatypes.double._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.double._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.double._CF_enumeration = CF_enumeration(value_datatype=datatypes.double)
datatypes.double._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.double._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.double)
datatypes.double._InitializeFacetMap(datatypes.double._CF_pattern,
datatypes.double._CF_minInclusive,
datatypes.double._CF_maxExclusive,
datatypes.double._CF_minExclusive,
datatypes.double._CF_enumeration,
datatypes.double._CF_whiteSpace,
datatypes.double._CF_maxInclusive)
datatypes.duration._CF_pattern = CF_pattern()
datatypes.duration._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.duration)
datatypes.duration._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.duration._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.duration._CF_enumeration = CF_enumeration(value_datatype=datatypes.duration)
datatypes.duration._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.duration._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.duration)
datatypes.duration._InitializeFacetMap(datatypes.duration._CF_pattern,
datatypes.duration._CF_minInclusive,
datatypes.duration._CF_maxExclusive,
datatypes.duration._CF_minExclusive,
datatypes.duration._CF_enumeration,
datatypes.duration._CF_whiteSpace,
datatypes.duration._CF_maxInclusive)
datatypes.float._CF_pattern = CF_pattern()
datatypes.float._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.float)
datatypes.float._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.float._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.float._CF_enumeration = CF_enumeration(value_datatype=datatypes.float)
datatypes.float._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.float._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.float)
datatypes.float._InitializeFacetMap(datatypes.float._CF_pattern,
datatypes.float._CF_minInclusive,
datatypes.float._CF_maxExclusive,
datatypes.float._CF_minExclusive,
datatypes.float._CF_enumeration,
datatypes.float._CF_whiteSpace,
datatypes.float._CF_maxInclusive)
datatypes.gDay._CF_pattern = CF_pattern()
datatypes.gDay._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.gDay)
datatypes.gDay._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gDay._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gDay._CF_enumeration = CF_enumeration(value_datatype=datatypes.gDay)
datatypes.gDay._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.gDay._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.gDay)
datatypes.gDay._InitializeFacetMap(datatypes.gDay._CF_pattern,
datatypes.gDay._CF_minInclusive,
datatypes.gDay._CF_maxExclusive,
datatypes.gDay._CF_minExclusive,
datatypes.gDay._CF_enumeration,
datatypes.gDay._CF_whiteSpace,
datatypes.gDay._CF_maxInclusive)
datatypes.gMonth._CF_pattern = CF_pattern()
datatypes.gMonth._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.gMonth)
datatypes.gMonth._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gMonth._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gMonth._CF_enumeration = CF_enumeration(value_datatype=datatypes.gMonth)
datatypes.gMonth._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.gMonth._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.gMonth)
datatypes.gMonth._InitializeFacetMap(datatypes.gMonth._CF_pattern,
datatypes.gMonth._CF_minInclusive,
datatypes.gMonth._CF_maxExclusive,
datatypes.gMonth._CF_minExclusive,
datatypes.gMonth._CF_enumeration,
datatypes.gMonth._CF_whiteSpace,
datatypes.gMonth._CF_maxInclusive)
datatypes.gMonthDay._CF_pattern = CF_pattern()
datatypes.gMonthDay._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.gMonthDay)
datatypes.gMonthDay._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gMonthDay._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gMonthDay._CF_enumeration = CF_enumeration(value_datatype=datatypes.gMonthDay)
datatypes.gMonthDay._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.gMonthDay._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.gMonthDay)
datatypes.gMonthDay._InitializeFacetMap(datatypes.gMonthDay._CF_pattern,
datatypes.gMonthDay._CF_minInclusive,
datatypes.gMonthDay._CF_maxExclusive,
datatypes.gMonthDay._CF_minExclusive,
datatypes.gMonthDay._CF_enumeration,
datatypes.gMonthDay._CF_whiteSpace,
datatypes.gMonthDay._CF_maxInclusive)
datatypes.gYear._CF_pattern = CF_pattern()
datatypes.gYear._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.gYear)
datatypes.gYear._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gYear._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gYear._CF_enumeration = CF_enumeration(value_datatype=datatypes.gYear)
datatypes.gYear._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.gYear._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.gYear)
datatypes.gYear._InitializeFacetMap(datatypes.gYear._CF_pattern,
datatypes.gYear._CF_minInclusive,
datatypes.gYear._CF_maxExclusive,
datatypes.gYear._CF_minExclusive,
datatypes.gYear._CF_enumeration,
datatypes.gYear._CF_whiteSpace,
datatypes.gYear._CF_maxInclusive)
datatypes.gYearMonth._CF_pattern = CF_pattern()
datatypes.gYearMonth._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.gYearMonth)
datatypes.gYearMonth._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gYearMonth._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.gYearMonth._CF_enumeration = CF_enumeration(value_datatype=datatypes.gYearMonth)
datatypes.gYearMonth._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.gYearMonth._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.gYearMonth)
datatypes.gYearMonth._InitializeFacetMap(datatypes.gYearMonth._CF_pattern,
datatypes.gYearMonth._CF_minInclusive,
datatypes.gYearMonth._CF_maxExclusive,
datatypes.gYearMonth._CF_minExclusive,
datatypes.gYearMonth._CF_enumeration,
datatypes.gYearMonth._CF_whiteSpace,
datatypes.gYearMonth._CF_maxInclusive)
datatypes.hexBinary._CF_minLength = CF_minLength()
datatypes.hexBinary._CF_maxLength = CF_maxLength()
datatypes.hexBinary._CF_enumeration = CF_enumeration(value_datatype=datatypes.hexBinary)
datatypes.hexBinary._CF_pattern = CF_pattern()
datatypes.hexBinary._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.hexBinary._CF_length = CF_length()
datatypes.hexBinary._InitializeFacetMap(datatypes.hexBinary._CF_minLength,
datatypes.hexBinary._CF_maxLength,
datatypes.hexBinary._CF_enumeration,
datatypes.hexBinary._CF_pattern,
datatypes.hexBinary._CF_whiteSpace,
datatypes.hexBinary._CF_length)
datatypes.int._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.int, value=datatypes.anySimpleType(six.u('-2147483648')))
datatypes.int._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.int, value=datatypes.anySimpleType(six.u('2147483647')))
datatypes.int._InitializeFacetMap(datatypes.int._CF_minInclusive,
datatypes.int._CF_maxInclusive)
datatypes.integer._CF_pattern = CF_pattern()
datatypes.integer._CF_pattern.addPattern(pattern=six.u('[\\-+]?[0-9]+'))
datatypes.integer._CF_fractionDigits = CF_fractionDigits(value=datatypes.nonNegativeInteger(0))
datatypes.integer._InitializeFacetMap(datatypes.integer._CF_pattern,
datatypes.integer._CF_fractionDigits)
datatypes.language._CF_pattern = CF_pattern()
datatypes.language._CF_pattern.addPattern(pattern=six.u('[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*'))
datatypes.language._InitializeFacetMap(datatypes.language._CF_pattern)
datatypes.long._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.long, value=datatypes.anySimpleType(six.u('-9223372036854775808')))
datatypes.long._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.long, value=datatypes.anySimpleType(six.u('9223372036854775807')))
datatypes.long._InitializeFacetMap(datatypes.long._CF_minInclusive,
datatypes.long._CF_maxInclusive)
datatypes.negativeInteger._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.negativeInteger, value=datatypes.anySimpleType(six.u('-1')))
datatypes.negativeInteger._InitializeFacetMap(datatypes.negativeInteger._CF_maxInclusive)
datatypes.nonNegativeInteger._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.nonNegativeInteger, value=datatypes.anySimpleType(six.u('0')))
datatypes.nonNegativeInteger._InitializeFacetMap(datatypes.nonNegativeInteger._CF_minInclusive)
datatypes.nonPositiveInteger._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.nonPositiveInteger, value=datatypes.anySimpleType(six.u('0')))
datatypes.nonPositiveInteger._InitializeFacetMap(datatypes.nonPositiveInteger._CF_maxInclusive)
datatypes.normalizedString._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.replace)
datatypes.normalizedString._InitializeFacetMap(datatypes.normalizedString._CF_whiteSpace)
datatypes.positiveInteger._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.positiveInteger, value=datatypes.anySimpleType(six.u('1')))
datatypes.positiveInteger._InitializeFacetMap(datatypes.positiveInteger._CF_minInclusive)
datatypes.short._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.short, value=datatypes.anySimpleType(six.u('-32768')))
datatypes.short._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.short, value=datatypes.anySimpleType(six.u('32767')))
datatypes.short._InitializeFacetMap(datatypes.short._CF_minInclusive,
datatypes.short._CF_maxInclusive)
datatypes.string._CF_minLength = CF_minLength()
datatypes.string._CF_maxLength = CF_maxLength()
datatypes.string._CF_enumeration = CF_enumeration(value_datatype=datatypes.string)
datatypes.string._CF_pattern = CF_pattern()
datatypes.string._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.preserve)
datatypes.string._CF_length = CF_length()
datatypes.string._InitializeFacetMap(datatypes.string._CF_minLength,
datatypes.string._CF_maxLength,
datatypes.string._CF_enumeration,
datatypes.string._CF_pattern,
datatypes.string._CF_whiteSpace,
datatypes.string._CF_length)
datatypes.time._CF_pattern = CF_pattern()
datatypes.time._CF_minInclusive = CF_minInclusive(value_datatype=datatypes.time)
datatypes.time._CF_maxExclusive = CF_maxExclusive(value_datatype=datatypes.anySimpleType)
datatypes.time._CF_minExclusive = CF_minExclusive(value_datatype=datatypes.anySimpleType)
datatypes.time._CF_enumeration = CF_enumeration(value_datatype=datatypes.time)
datatypes.time._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.time._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.time)
datatypes.time._InitializeFacetMap(datatypes.time._CF_pattern,
datatypes.time._CF_minInclusive,
datatypes.time._CF_maxExclusive,
datatypes.time._CF_minExclusive,
datatypes.time._CF_enumeration,
datatypes.time._CF_whiteSpace,
datatypes.time._CF_maxInclusive)
datatypes.token._CF_whiteSpace = CF_whiteSpace(value=_WhiteSpace_enum.collapse)
datatypes.token._InitializeFacetMap(datatypes.token._CF_whiteSpace)
datatypes.unsignedByte._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.unsignedByte, value=datatypes.anySimpleType(six.u('255')))
datatypes.unsignedByte._InitializeFacetMap(datatypes.unsignedByte._CF_maxInclusive)
datatypes.unsignedInt._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.unsignedInt, value=datatypes.anySimpleType(six.u('4294967295')))
datatypes.unsignedInt._InitializeFacetMap(datatypes.unsignedInt._CF_maxInclusive)
datatypes.unsignedLong._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.unsignedLong, value=datatypes.anySimpleType(six.u('18446744073709551615')))
datatypes.unsignedLong._InitializeFacetMap(datatypes.unsignedLong._CF_maxInclusive)
datatypes.unsignedShort._CF_maxInclusive = CF_maxInclusive(value_datatype=datatypes.unsignedShort, value=datatypes.anySimpleType(six.u('65535')))
datatypes.unsignedShort._InitializeFacetMap(datatypes.unsignedShort._CF_maxInclusive)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import neighbor
class neighbors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration for BGP neighbors
"""
__slots__ = ("_path_helper", "_extmethods", "__neighbor")
_yang_name = "neighbors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__neighbor = YANGDynClass(
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
]
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor (list)
YANG Description: List of BGP neighbors configured on the local system,
uniquely identified by peer IPv[46] address
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: List of BGP neighbors configured on the local system,
uniquely identified by peer IPv[46] address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("neighbor_address",neighbor.neighbor, yang_name="neighbor", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-address', extensions=None), is_container='list', yang_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__neighbor = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
neighbor = __builtin__.property(_get_neighbor, _set_neighbor)
_pyangbind_elements = OrderedDict([("neighbor", neighbor)])
from . import neighbor
class neighbors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration for BGP neighbors
"""
__slots__ = ("_path_helper", "_extmethods", "__neighbor")
_yang_name = "neighbors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__neighbor = YANGDynClass(
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
]
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor (list)
YANG Description: List of BGP neighbors configured on the local system,
uniquely identified by peer IPv[46] address
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: List of BGP neighbors configured on the local system,
uniquely identified by peer IPv[46] address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("neighbor_address",neighbor.neighbor, yang_name="neighbor", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='neighbor-address', extensions=None), is_container='list', yang_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__neighbor = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(
base=YANGListType(
"neighbor_address",
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="neighbor-address",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
neighbor = __builtin__.property(_get_neighbor, _set_neighbor)
_pyangbind_elements = OrderedDict([("neighbor", neighbor)])
| |
"""
CNN for sentence modeling described in paper:
A Convolutional Neural Network for Modeling Sentence
"""
import sys, os, time
import pdb
import math, random
import numpy as np
import theano
import theano.tensor as T
from util import (load_data, dump_params)
from logreg import LogisticRegression
class WordEmbeddingLayer(object):
"""
Layer that takes input vectors, output the sentence matrix
"""
def __init__(self, rng,
input,
vocab_size,
embed_dm,
embeddings = None,
):
"""
input: theano.tensor.dmatrix, (number of instances, sentence word number)
vocab_size: integer, the size of vocabulary,
embed_dm: integer, the dimension of word vector representation
embeddings: theano.tensor.TensorType
pretrained embeddings
"""
if embeddings:
print "Use pretrained embeddings: ON"
assert embeddings.get_value().shape == (vocab_size, embed_dm), "%r != %r" %(
embeddings.get_value().shape,
(vocab_size, embed_dm)
)
self.embeddings = embeddings
else:
print "Use pretrained embeddings: OFF"
embedding_val = np.asarray(
rng.normal(0, 0.05, size = (vocab_size, embed_dm)),
dtype = theano.config.floatX
)
embedding_val[vocab_size-1,:] = 0 # the <PADDING> character is intialized to 0
self.embeddings = theano.shared(
np.asarray(embedding_val,
dtype = theano.config.floatX),
borrow = True,
name = 'embeddings'
)
self.params = [self.embeddings]
self.param_shapes = [(vocab_size, embed_dm)]
# Return:
# :type, theano.tensor.tensor4
# :param, dimension(1, 1, word embedding dimension, number of words in sentence)
# made to be 4D to fit into the dimension of convolution operation
sent_embedding_list, updates = theano.map(lambda sent: self.embeddings[sent],
input)
sent_embedding_tensor = T.stacklists(sent_embedding_list) # make it into a 3D tensor
self.output = sent_embedding_tensor.dimshuffle(0, 'x', 2, 1) # make it a 4D tensor
class ConvFoldingPoolLayer(object):
"""
Convolution, folding and k-max pooling layer
"""
def __init__(self,
rng,
input,
filter_shape,
k,
activation = "tanh",
norm_w = True,
fold = 0,
W = None,
b = None):
"""
rng: numpy random number generator
input: theano.tensor.tensor4
the sentence matrix, (number of instances, number of input feature maps, embedding dimension, number of words)
filter_shape: tuple of length 4,
dimension: (number of filters, num input feature maps, filter height, filter width)
k: int or theano.tensor.iscalar,
the k value in the max-pooling layer
activation: str
the activation unit type, `tanh` or `relu` or 'sigmoid'
norm_w: bool
whether use fan-in fan-out initialization or not. Default, True
If not True, use `normal(0, 0.05, size)`
fold: int, 0 or 1
fold or not
W: theano.tensor.tensor4,
the filter weight matrices,
dimension: (number of filters, num input feature maps, filter height, filter width)
b: theano.tensor.vector,
the filter bias,
dimension: (filter number, )
"""
self.input = input
self.k = k
self.filter_shape = filter_shape
self.fold_flag = fold
assert activation in ('tanh', 'relu', 'sigmoid')
self.activation = activation
if W is not None:
self.W = W
else:
if norm_w:
# use fan-in fan-out init
fan_in = np.prod(filter_shape[1:])
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
k) # it's
W_bound = np.sqrt(6. / (fan_in + fan_out))
W_val = np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
)
else:
# normal initialization
W_val = np.asarray(
rng.normal(0, 0.05, size = filter_shape),
dtype=theano.config.floatX
)
self.W = theano.shared(
value = np.asarray(W_val,
dtype = theano.config.floatX),
name = "W",
borrow=True
)
# make b
if b is not None:
b_val = b
b_size = b.shape
self.b = b
else:
b_size = (filter_shape[0], )
b_val = np.zeros(b_size)
self.b = theano.shared(
value = np.asarray(
b_val,
dtype = theano.config.floatX
),
name = "b",
borrow = True
)
self.params = [self.W, self.b]
self.param_shapes = [filter_shape,
b_size ]
def fold(self, x):
"""
:type x: theano.tensor.tensor4
"""
return (x[:, :, T.arange(0, x.shape[2], 2)] +
x[:, :, T.arange(1, x.shape[2], 2)]) / 2
def k_max_pool(self, x, k):
"""
perform k-max pool on the input along the rows
input: theano.tensor.tensor4
k: theano.tensor.iscalar
the k parameter
Returns:
4D tensor
"""
ind = T.argsort(x, axis = 3)
sorted_ind = T.sort(ind[:,:,:, -k:], axis = 3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = T.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = T.arange(dim1).repeat(dim2 * dim3).reshape((dim1*dim2*dim3, 1)).repeat(dim0, axis=1).T.flatten()
indices_dim2 = T.arange(dim2).repeat(dim3).reshape((dim2*dim3, 1)).repeat(dim0 * dim1, axis = 1).T.flatten()
return x[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
@property
def output(self):
# non-linear transform of the convolution output
conv_out = T.nnet.conv.conv2d(self.input,
self.W,
border_mode = "full")
if self.fold_flag:
# fold
fold_out = self.fold(conv_out)
else:
fold_out = conv_out
# k-max pool
pool_out = (self.k_max_pool(fold_out, self.k) +
self.b.dimshuffle('x', 0, 'x', 'x'))
# around 0.
# why tanh becomes extreme?
if self.activation == "tanh":
# return theano.printing.Print("tanh(pool_out)")(T.tanh(pool_out))
return T.tanh(pool_out)
elif self.activation == "sigmoid":
return T.nnet.sigmoid(pool_out)
else:
return T.switch(pool_out > 0, pool_out, 0)
class DropoutLayer(object):
"""
As the name suggests
Refer to here: https://github.com/mdenil/dropout/blob/master/mlp.py
"""
def __init__(self, input, rng, dropout_rate):
srng = theano.tensor.shared_randomstreams.RandomStreams(
rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1,
p=1-dropout_rate,
size=input.shape)
self.output = input * T.cast(mask, theano.config.floatX)
def train_and_test(args, print_config):
assert args.conv_layer_n == len(args.filter_widths) == len(args.nkerns) == (len(args.L2_regs) - 2) == len(args.fold_flags) == len(args.ks)
# \mod{dim, 2^{\sum fold_flags}} == 0
assert args.embed_dm % (2 ** sum(args.fold_flags)) == 0
###################
# get the data #
###################
datasets = load_data(args.corpus_path)
train_set_x, train_set_y = datasets[0]
dev_set_x, dev_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
word2index = datasets[3]
index2word = datasets[4]
pretrained_embeddings = datasets[5]
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / args.batch_size
n_dev_batches = dev_set_x.get_value(borrow=True).shape[0] / args.dev_test_batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / args.dev_test_batch_size
train_sent_len = train_set_x.get_value(borrow=True).shape[1]
possible_labels = set(train_set_y.get_value().tolist())
if args.use_pretrained_embedding:
args.embed_dm = pretrained_embeddings.get_value().shape[1]
###################################
# Symbolic variable definition #
###################################
x = T.imatrix('x') # the word indices matrix
y = T.ivector('y') # the sentiment labels
batch_index = T.iscalar('batch_index')
rng = np.random.RandomState(1234)
###############################
# Construction of the network #
###############################
# Layer 1, the embedding layer
layer1 = WordEmbeddingLayer(rng,
input = x,
vocab_size = len(word2index),
embed_dm = args.embed_dm,
embeddings = (
pretrained_embeddings
if args.use_pretrained_embedding else None
)
)
dropout_layers = [layer1]
layers = [layer1]
for i in xrange(args.conv_layer_n):
fold_flag = args.fold_flags[i]
# for the dropout layer
dpl = DropoutLayer(
input = dropout_layers[-1].output,
rng = rng,
dropout_rate = args.dropout_rates[0]
)
next_layer_dropout_input = dpl.output
next_layer_input = layers[-1].output
# for the conv layer
filter_shape = (
args.nkerns[i],
(1 if i == 0 else args.nkerns[i-1]),
1,
args.filter_widths[i]
)
k = args.ks[i]
print "For conv layer(%s) %d, filter shape = %r, k = %d, dropout_rate = %f and normalized weight init: %r and fold: %d" %(
args.conv_activation_unit,
i+2,
filter_shape,
k,
args.dropout_rates[i],
args.norm_w,
fold_flag
)
# we have two layers adding to two paths repsectively,
# one for training
# the other for prediction(averaged model)
dropout_conv_layer = ConvFoldingPoolLayer(rng,
input = next_layer_dropout_input,
filter_shape = filter_shape,
k = k,
norm_w = args.norm_w,
fold = fold_flag,
activation = args.conv_activation_unit)
# for prediction
# sharing weight with dropout layer
conv_layer = ConvFoldingPoolLayer(rng,
input = next_layer_input,
filter_shape = filter_shape,
k = k,
activation = args.conv_activation_unit,
fold = fold_flag,
W = dropout_conv_layer.W * (1 - args.dropout_rates[i]), # model averaging
b = dropout_conv_layer.b
)
dropout_layers.append(dropout_conv_layer)
layers.append(conv_layer)
# last, the output layer
# both dropout and without dropout
if sum(args.fold_flags) > 0:
n_in = args.nkerns[-1] * args.ks[-1] * args.embed_dm / (2**sum(args.fold_flags))
else:
n_in = args.nkerns[-1] * args.ks[-1] * args.embed_dm
print "For output layer, n_in = %d, dropout_rate = %f" %(n_in, args.dropout_rates[-1])
dropout_output_layer = LogisticRegression(
rng,
input = dropout_layers[-1].output.flatten(2),
n_in = n_in, # divided by 2x(how many times are folded)
n_out = len(possible_labels) # five sentiment level
)
output_layer = LogisticRegression(
rng,
input = layers[-1].output.flatten(2),
n_in = n_in,
n_out = len(possible_labels),
W = dropout_output_layer.W * (1 - args.dropout_rates[-1]), # sharing the parameters, don't forget
b = dropout_output_layer.b
)
dropout_layers.append(dropout_output_layer)
layers.append(output_layer)
###############################
# Error and cost #
###############################
# cost and error come from different model!
dropout_cost = dropout_output_layer.nnl(y)
errors = output_layer.errors(y)
def prepare_L2_sqr(param_layers, L2_regs):
assert len(L2_regs) == len(param_layers)
return T.sum([
L2_reg / 2 * ((layer.W if hasattr(layer, "W") else layer.embeddings) ** 2).sum()
for L2_reg, layer in zip(L2_regs, param_layers)
])
L2_sqr = prepare_L2_sqr(dropout_layers, args.L2_regs)
L2_sqr_no_ebd = prepare_L2_sqr(dropout_layers[1:], args.L2_regs[1:])
if args.use_L2_reg:
cost = dropout_cost + L2_sqr
cost_no_ebd = dropout_cost + L2_sqr_no_ebd
else:
cost = dropout_cost
cost_no_ebd = dropout_cost
###############################
# Parameters to be used #
###############################
print "Delay embedding learning by %d epochs" %(args.embedding_learning_delay_epochs)
print "param_layers: %r" %dropout_layers
param_layers = dropout_layers
##############################
# Parameter Update #
##############################
print "Using AdaDelta with rho = %f and epsilon = %f" %(args.rho, args.epsilon)
params = [param for layer in param_layers for param in layer.params]
param_shapes= [param for layer in param_layers for param in layer.param_shapes]
param_grads = [T.grad(cost, param) for param in params]
# AdaDelta parameter update
# E[g^2]
# initialized to zero
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
# E[\delta x^2], initialized to zero
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
args.rho * eg + (1 - args.rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + args.epsilon) / T.sqrt(new_eg + args.epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
args.rho * ex + (1 - args.rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
# updates WITHOUT embedding
# exclude the embedding parameter
egs_updates_no_ebd = zip(egs[1:], new_egs[1:])
exs_updates_no_ebd = zip(exs[1:], new_exs[1:])
param_updates_no_ebd = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)[1:]
]
updates_no_emb = egs_updates_no_ebd + exs_updates_no_ebd + param_updates_no_ebd
def make_train_func(cost, updates):
return theano.function(inputs = [batch_index],
outputs = [cost],
updates = updates,
givens = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
)
train_model_no_ebd = make_train_func(cost_no_ebd, updates_no_emb)
train_model = make_train_func(cost, updates)
def make_error_func(x_val, y_val):
return theano.function(inputs = [],
outputs = errors,
givens = {
x: x_val,
y: y_val
},
)
dev_error = make_error_func(dev_set_x, dev_set_y)
test_error = make_error_func(test_set_x, test_set_y)
#############################
# Debugging purpose code #
#############################
# : PARAMETER TUNING NOTE:
# some demonstration of the gradient vanishing probelm
train_data_at_index = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
}
train_data_at_index_with_y = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
if print_config["nnl"]:
get_nnl = theano.function(
inputs = [batch_index],
outputs = dropout_cost,
givens = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
)
if print_config["L2_sqr"]:
get_L2_sqr = theano.function(
inputs = [],
outputs = L2_sqr
)
get_L2_sqr_no_ebd = theano.function(
inputs = [],
outputs = L2_sqr_no_ebd
)
if print_config["grad_abs_mean"]:
print_grads = theano.function(
inputs = [],
outputs = [theano.printing.Print(param.name)(
T.mean(T.abs_(param_grad))
)
for param, param_grad in zip(params, param_grads)
],
givens = {
x: train_set_x,
y: train_set_y
}
)
activations = [
l.output
for l in dropout_layers[1:-1]
]
weight_grads = [
T.grad(cost, l.W)
for l in dropout_layers[1:-1]
]
if print_config["activation_hist"]:
# turn into 1D array
get_activations = theano.function(
inputs = [batch_index],
outputs = [
val.flatten(1)
for val in activations
],
givens = train_data_at_index
)
if print_config["weight_grad_hist"]:
# turn into 1D array
get_weight_grads = theano.function(
inputs = [batch_index],
outputs = [
val.flatten(1)
for val in weight_grads
],
givens = train_data_at_index_with_y
)
if print_config["activation_tracking"]:
# get the mean and variance of activations for each conv layer
get_activation_mean = theano.function(
inputs = [batch_index],
outputs = [
T.mean(val)
for val in activations
],
givens = train_data_at_index
)
get_activation_std = theano.function(
inputs = [batch_index],
outputs = [
T.std(val)
for val in activations
],
givens = train_data_at_index
)
if print_config["weight_grad_tracking"]:
# get the mean and variance of activations for each conv layer
get_weight_grad_mean = theano.function(
inputs = [batch_index],
outputs = [
T.mean(g)
for g in weight_grads
],
givens = train_data_at_index_with_y
)
get_weight_grad_std = theano.function(
inputs = [batch_index],
outputs = [
T.std(g)
for g in weight_grads
],
givens = train_data_at_index_with_y
)
#the training loop
patience = args.patience # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = np.inf
best_iter = 0
start_time = time.clock()
done_looping = False
epoch = 0
nnls = []
L2_sqrs = []
activation_means = [[] for i in xrange(args.conv_layer_n)]
activation_stds = [[] for i in xrange(args.conv_layer_n)]
weight_grad_means = [[] for i in xrange(args.conv_layer_n)]
weight_grad_stds = [[] for i in xrange(args.conv_layer_n)]
activation_hist_data = [[] for i in xrange(args.conv_layer_n)]
weight_grad_hist_data = [[] for i in xrange(args.conv_layer_n)]
train_errors = []
dev_errors = []
try:
print "validation_frequency = %d" %validation_frequency
while (epoch < args.n_epochs):
epoch += 1
print "At epoch {0}".format(epoch)
if epoch == (args.embedding_learning_delay_epochs + 1):
print "########################"
print "Start training embedding"
print "########################"
# shuffle the training data
train_set_x_data = train_set_x.get_value(borrow = True)
train_set_y_data = train_set_y.get_value(borrow = True)
permutation = np.random.permutation(train_set_x.get_value(borrow=True).shape[0])
train_set_x.set_value(train_set_x_data[permutation])
train_set_y.set_value(train_set_y_data[permutation])
for minibatch_index in xrange(n_train_batches):
if epoch >= (args.embedding_learning_delay_epochs + 1):
train_cost = train_model(minibatch_index)
else:
train_cost = train_model_no_ebd(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# train_error_val = np.mean([train_error(i)
# for i in xrange(n_train_batches)])
dev_error_val = dev_error()
# print "At epoch %d and minibatch %d. \nTrain error %.2f%%\nDev error %.2f%%\n" %(
# epoch,
# minibatch_index,
# train_error_val * 100,
# dev_error_val * 100
# )
print "At epoch %d and minibatch %d. \nDev error %.2f%%\n" %(
epoch,
minibatch_index,
dev_error_val * 100
)
# train_errors.append(train_error_val)
dev_errors.append(dev_error_val)
if dev_error_val < best_validation_loss:
best_iter = iter
#improve patience if loss improvement is good enough
if dev_error_val < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = dev_error_val
test_error_val = test_error()
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best dev error %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_error_val * 100.
)
)
print "Dumping model to %s" %(args.model_path)
dump_params(params, args.model_path)
if (minibatch_index+1) % 50 == 0 or minibatch_index == n_train_batches - 1:
print "%d / %d minibatches completed" %(minibatch_index + 1, n_train_batches)
if print_config["nnl"]:
print "`nnl` for the past 50 minibatches is %f" %(np.mean(np.array(nnls)))
nnls = []
if print_config["L2_sqr"]:
print "`L2_sqr`` for the past 50 minibatches is %f" %(np.mean(np.array(L2_sqrs)))
L2_sqrs = []
##################
# Plotting stuff #
##################
if print_config["nnl"]:
nnl = get_nnl(minibatch_index)
# print "nll for batch %d: %f" %(minibatch_index, nnl)
nnls.append(nnl)
if print_config["L2_sqr"]:
if epoch >= (args.embedding_learning_delay_epochs + 1):
L2_sqrs.append(get_L2_sqr())
else:
L2_sqrs.append(get_L2_sqr_no_ebd())
if print_config["activation_tracking"]:
layer_means = get_activation_mean(minibatch_index)
layer_stds = get_activation_std(minibatch_index)
for layer_ms, layer_ss, layer_m, layer_s in zip(activation_means, activation_stds, layer_means, layer_stds):
layer_ms.append(layer_m)
layer_ss.append(layer_s)
if print_config["weight_grad_tracking"]:
layer_means = get_weight_grad_mean(minibatch_index)
layer_stds = get_weight_grad_std(minibatch_index)
for layer_ms, layer_ss, layer_m, layer_s in zip(weight_grad_means, weight_grad_stds, layer_means, layer_stds):
layer_ms.append(layer_m)
layer_ss.append(layer_s)
if print_config["activation_hist"]:
for layer_hist, layer_data in zip(activation_hist_data , get_activations(minibatch_index)):
layer_hist += layer_data.tolist()
if print_config["weight_grad_hist"]:
for layer_hist, layer_data in zip(weight_grad_hist_data , get_weight_grads(minibatch_index)):
layer_hist += layer_data.tolist()
except:
import traceback
traceback.print_exc(file = sys.stdout)
finally:
from plot_util import (plot_hist,
plot_track,
plot_error_vs_epoch,
plt)
if print_config["activation_tracking"]:
plot_track(activation_means,
activation_stds,
"activation_tracking")
if print_config["weight_grad_tracking"]:
plot_track(weight_grad_means,
weight_grad_stds,
"weight_grad_tracking")
if print_config["activation_hist"]:
plot_hist(activation_hist_data, "activation_hist")
if print_config["weight_grad_hist"]:
plot_hist(weight_grad_hist_data, "weight_grad_hist")
if print_config["error_vs_epoch"]:
train_errors = [0] * len(dev_errors)
ax = plot_error_vs_epoch(train_errors, dev_errors,
title = ('Best dev score: %f %% '
' at iter %i with test error %f %%') %(
best_validation_loss * 100., best_iter + 1, test_error_val * 100.
)
)
if not args.task_signature:
plt.show()
else:
plt.savefig("plots/" + args.task_signature + ".png")
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_error_val * 100.))
# save the result
with open(args.output, "a") as f:
f.write("%s\t%f\t%f\n" %(args.task_signature, best_validation_loss, test_error_val))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == "__main__":
print_config = {
"adadelta_lr_mean": 0,
"adagrad_lr_mean": 0,
"embeddings": 0,
"logreg_W": 0,
"logreg_b": 0,
"conv_layer1_W": 0,
"conv_layer2_W": 0,
"activation_tracking": 0, # the activation value, mean and variance
"weight_grad_tracking": 0, # the weight gradient tracking
"backprop_grad_tracking": 0, # the backpropagated gradient, mean and variance. In this case, grad propagated from layer 2 to layer 1
"activation_hist": 0, # the activation value, mean and variance
"weight_grad_hist": 0, # the weight gradient tracking
"backprop_grad_hist": 0,
"error_vs_epoch": 1,
"l1_output": 0,
"dropout_l1_output": 0,
"l2_output": 0,
"dropout_l2_output": 0,
"l3_output": 0,
"p_y_given_x": 0,
"grad_abs_mean": 0,
"nnl": 1,
"L2_sqr": 1,
"param_weight_mean": 0,
}
import argparse, sys
parser = argparse.ArgumentParser(description = "CNN with k-max pooling for sentence classification")
parser.add_argument('--corpus_path', type=str,
required = True,
help = 'Path of preprocessed corpus'
)
parser.add_argument('--model_path', type=str,
required = True,
help = 'Path of model parameters'
)
parser.add_argument("--fold", type=int, default = [1,1], nargs="+",
dest = "fold_flags",
help = "Flags that turn on/off folding"
)
parser.add_argument("--ext_ebd", action = "store_true",
dest = "use_pretrained_embedding",
help = "Use external/pretrained word embedding or not. For unkown reasons, type checking does not work for this argument"
)
parser.add_argument("--l2", action = "store_true",
dest = "use_L2_reg",
help = "Use L2 regularization or not"
)
parser.add_argument("--lr", type=float, default = 0.001,
dest = "learning_rate",
help = "Learning rate if constant learning rate is applied"
)
parser.add_argument("--norm_w", action = "store_true",
help = "Normalized initial weight as descripted in Glorot's paper"
)
parser.add_argument("--ebd_delay_epoch", type=int, default = 4,
dest = "embedding_learning_delay_epochs",
help = "Embedding learning delay epochs"
)
parser.add_argument("--au", type=str, default = "tanh",
dest = "conv_activation_unit",
help = "Activation unit type for the convolution layer"
)
parser.add_argument("--eps", type=float, default =0.000001,
dest = "epsilon",
help = "Epsilon used by AdaDelta"
)
parser.add_argument("--rho", type=float, default = 0.95,
help = "Rho used by AdaDelta"
)
parser.add_argument("--ebd_dm", type=int, default = 48,
dest = "embed_dm",
help = "Dimension for word embedding"
)
parser.add_argument("--batch_size", type=int, default = 10,
dest = "batch_size",
help = "Batch size in the stochastic gradient descent"
)
parser.add_argument("--dev_test_batch_size", type=int, default = 1000,
help = "Batch size for dev/test data"
)
parser.add_argument("--n_epochs", type=int, default =20,
help = "Maximum number of epochs to perform during training"
)
parser.add_argument("--dr", type=float, default = [0.2, 0.5, 0.5], nargs="+",
dest = "dropout_rates",
help = "Dropout rates at all layers except output layer"
)
parser.add_argument("--l2_regs", type = float, default = [0.00001, 0.0003, 0.0003, 0.0001], nargs="+",
dest = "L2_regs",
help = "L2 regularization parameters at each layer. left/low->right/high"
)
parser.add_argument("--ks", type = int, default = [15, 6], nargs="+",
help = "The k values of the k-max pooling operation"
)
parser.add_argument("--conv_layer_n", type=int, default = 2,
help = "Number of convolution layers"
)
parser.add_argument("--nkerns", type=int, default = [6,12], nargs="+",
help = "Number of feature maps at each conv layer"
)
parser.add_argument("--filter_widths", type=int, default = [10,7], nargs="+",
help = "Filter width for each conv layer"
)
parser.add_argument("--task_signature", type=str,
help = "The prefix of the saved images."
)
parser.add_argument("--output", type=str,
required = True,
help = "The output file path to save the result"
)
parser.add_argument("--patience", type=int,
default = 5000,
help = "Patience parameter used for early stopping"
)
args = parser.parse_args(sys.argv[1:])
print "Configs:\n-------------\n"
for attr, value in vars(args).items():
print "%s: %r" %(
attr.ljust(25),
value
)
train_and_test(
args,
print_config
)
| |
import os, struct,subprocess,io
from operator import itemgetter, attrgetter
def readStringAt(loc,f):
f.seek(loc)
s = ""
c = f.read(1)
while len(c) == 1 and ord(c) != 0:
s = s+c
c = f.read(1)
return s
BLOCKNAMES = ["0","ASM","Init","Final","Constants","Objects","Other"]
class RelCommand:
def __init__(self,f=0):
self.Internal = 0
self.File = 0
self.Comment = ""
if f:
data = struct.unpack(">HBBI",f.read(8))
self.Inc = data[0]
self.Command = data[1]
self.TargetBlockIndex = data[2]
self.Operand = data[3]
self.File = 1
def __str__(self):
if self.Internal and self.Block != 1 and self.TargetBlockIndex == 1 and self.TargetModuleID == 0x1B:
return "sora_ASMRel @%08X ID%03X C%02X" % (self.Operand+0x8070aa14,self.Index,self.Command)
elif self.Internal and self.Block != 1 and self.TargetBlockIndex == 1:
return "ASMRel @%06X ID%03X C%02X" % (self.Offset,self.Index,self.Command)
elif self.Internal and self.Block == 1 and self.TargetBlockIndex == 1:
return "ASMRel +%d ID%03X C%02X" % (self.Offset&3,self.Index,self.Command)
elif self.Block == 1 and self.File:
return "Rel +%d C%02X M%02X B%02X @%06X" % (self.Offset&3,self.Command,self.TargetModuleID,self.TargetBlockIndex,self.Operand)
else:
return "Rel @%06X C%02X M%02X B%02X @%06X" % (self.Offset,self.Command,self.TargetModuleID,self.TargetBlockIndex,self.Operand)
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
class RelBlock:
def RelAt(self,off):
ptrRell = filter(lambda rel: off == rel.Offset and rel.Block == self.Index, self.RelFile.Rels)
if len(ptrRell) == 0:
return None
return ptrRell[0]
def strat(self,off):
strlen = 0
while ord(self.Data[off+strlen]) != 0:
strlen +=1
return self.Data[off:off+strlen]
def dumpData(self,asm=1):
if len(BLOCKNAMES) > self.Index:
name = BLOCKNAMES[self.Index]
else:
name = "Unk%02d" % self.Index
if self.Offset == 0:
return
out = open("working/"+name+".raw","wb")
out.write(self.Data)
out.close()
if self.Index == 1 and asm:
#run vdappc on the raw output
working = open("tmp.out","wb")
subprocess.Popen("vdappc.exe working/"+name+".raw 0", stdout=working,stderr=subprocess.PIPE, startupinfo=startupinfo).communicate()
working.close()
working = open("tmp.out","rb")
c = open("working/"+name+".asm","wb")
working.seek(0)
rels = filter(lambda rel: self.Index == rel.Block,self.RelFile.Rels)
targetrels = filter(lambda rel: self.RelFile.FileID == rel.TargetModuleID and self.Index == rel.TargetBlockIndex,self.RelFile.Rels)
target = float(self.Size/20)
offset = 0
RAWData = working.read().split("\n")
processed = []
for data in RAWData:
if offset > target:
print float(offset)/self.Size,
target = target+self.Size/20
#print hex(offset)
data = data[20:-1]#drop newline
if data.find("0x") != -1:
tmpindex = data.find("0x")
staticoff = int(data[tmpindex:],16)
data = data[0:tmpindex] + hex(staticoff-offset)
processed.append(data)
offset = offset+4
for thing in ["GetSrc","Init","Finalize"]:
index = self.RelFile.__dict__[thing] & 0xFFFFFFFC
processed[index/4] = processed[index/4]+"#"+thing
for rel in rels:
index = rel.Offset & 0xFFFFFFFC
processed[index/4] = processed[index/4]+"#"+str(rel)
for rel in targetrels:
index = rel.Operand & 0xFFFFFFFC
processed[index/4] = processed[index/4]+"#Target %03X"%rel.Index
if rel.Comment != "":
processed[index/4] = "".join(["#\n#",rel.Comment," @", hex(index),"\n#\n",processed[index/4]])
for s in processed:
c.write(s)
c.write("\n")
c.close()
elif self.Index != 1:
out = open("working/"+name+".txt","wb")
for rel in sorted(self.RelFile.Rels, key=attrgetter('Offset')):
if self.Index == rel.Block:
out.write(str(rel)+"\n")
def __str__(self):
global BLOCKNAMES
if len(BLOCKNAMES) > self.Index:
name = BLOCKNAMES[self.Index]
else:
name = "Unk%02d" % self.Index
return "%10s - Flags %d, Start %X - Size %X - End %X"%(name,self.Flags,self.Offset,self.Size,self.Offset+self.Size)
pass
def compileFile(filename):
output = subprocess.Popen(["powerpc-gekko-as.exe", "-mregnames", "-mgekko", "-o","tmp.o", filename], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo).communicate()
subprocess.call(["powerpc-gekko-objcopy.exe", "-O", "binary",
"tmp.o", filename.replace(".asm",".raw")], stderr=subprocess.PIPE, startupinfo=startupinfo)
print "Compiled",filename,"into ",filename.replace(".asm",".raw")
class RelFile:
def readBlocks(self):
self.Rels = []
newBlocks = []
modulerefs = []
fullrels = []
asmrels = {}
targets = {}
for block in self.Blocks:
if block.Index == 1:
offset = 0
compileFile("working/ASM.asm")
tmp = open("working/ASM.raw","rb")
block.Data = tmp.read()
tmp.close()
for line in open("working/ASM.asm","r"):
line = line[0:-1]#strip newline
if len(line) == 0 or line[0] == "#" or line[0]== " ":
print line
continue
data = line.split("#")
for i in range(1,len(data)):
cur = data[i]
params = cur.split(" ")
if cur in ["GetSrc","Init","Finalize"]:
self.__dict__[cur] = offset
elif params[0] == "ASMRel":
reldata = RelCommand()
reldata.Offset = offset+int(params[1][1])
reldata.Command = int(params[3][1:],16)
reldata.TargetModuleID = self.FileID
reldata.Block = block.Index
reldata.TargetBlockIndex = block.Index
asmrels[int(params[2][2:],16)] = reldata
elif params[0] == "Rel":
reldata = RelCommand()
reldata.Offset = offset+int(params[1][1])
reldata.Command = int(params[2][1:],16)
reldata.TargetModuleID = int(params[3][1:],16)
reldata.Block = block.Index
reldata.TargetBlockIndex = int(params[4][1:],16)
reldata.Operand = int(params[5][1:],16)
fullrels.append(reldata)
elif params[0] == "Target":
targets[int(params[1],16)] = offset
else:
print hex(offset),params
offset +=4
else:
if block.Offset == 0:
continue
tmp = open("working/"+block.Name+".raw","rb")
block.Data = tmp.read()
tmp.close()
for line in open("working/"+block.Name+".txt","r"):
line = line[0:-1]#strip newline
params = line.split(" ")
if params[0] == "ASMRel":
reldata = RelCommand()
reldata.Offset = int(params[1][1:],16)
reldata.Command = int(params[3][1:],16)
reldata.TargetModuleID = self.FileID
reldata.Block = block.Index
reldata.TargetBlockIndex = 1
asmrels[int(params[2][2:],16)] = reldata
elif params[0] == "Rel":
reldata = RelCommand()
reldata.Offset = int(params[1][1:],16)
reldata.Command = int(params[2][1:],16)
reldata.TargetModuleID = int(params[3][1:],16)
reldata.Block = block.Index
reldata.TargetBlockIndex = int(params[4][1:],16)
reldata.Operand = int(params[5][1:],16)
fullrels.append(reldata)
else:
print params
#all blocks loaded, process rels
for key,arel in asmrels.items():
if key not in targets:
raise Exception("NO TARGET FOUND FOR KEY %03X"%key)
arel.Operand = targets[key]
fullrels.append(arel)
truerelist = sorted(sorted(sorted(fullrels, key=attrgetter('Offset')), key=attrgetter('Block')), key=attrgetter('TargetModuleID'))
self.Rels = truerelist
def toFile(self,filename):
f = open(filename,"wb")
f.seek(0x4C)
curoff = 0x4C+self.BlockCount*8
for block in self.Blocks:
if block.Offset != 0:
f.write(struct.pack(">I",curoff|block.Flags))
print block.Index,block.Offset-curoff,len(block.Data)
else:
f.write(struct.pack(">I",0|block.Flags))
f.write(struct.pack(">I",len(block.Data)))
if block.Offset != 0:
curoff += len(block.Data)
if block.Index == 4:
curoff += 12
for block in self.Blocks:
if block.Offset != 0:
f.write(block.Data)
if block.Index == 4:
f.seek(12,1)
#Build Proper RelLists (per referenced module
RelLists = {}
currentMod = self.Rels[0].TargetModuleID
currentBlock = -1
previousOff = 0
for rel in self.Rels:
if rel.TargetModuleID != currentMod:
RelLists[currentMod] += struct.pack(">HBBI",0,0xCB,0,0)
currentMod = rel.TargetModuleID
print "CB COMMAND",hex(currentMod)
currentBlock = -1
if rel.Block != currentBlock:
if currentMod not in RelLists:
RelLists[currentMod] = ""
RelLists[currentMod] += struct.pack(">HBBI",0,0xCA,rel.Block,0)
currentBlock = rel.Block
print "CA COMMAND",hex(currentBlock)
previousOff = 0
previousOff = 0
#print rel
RelLists[currentMod] += struct.pack(">HBBI",rel.Offset-previousOff,rel.Command,rel.TargetBlockIndex,rel.Operand)
previousOff = rel.Offset
#end the last list
RelLists[currentMod] += struct.pack(">HBBI",0,0xCB,0,0)
#writeRelLists
curoff = f.tell()+len(RelLists)*8
#proper order
modlist = []
for k in RelLists.keys():
if k == 0:
continue
else:
modlist.append(k)
modlist = sorted(modlist)
modlist.append(0)
self.RelList = f.tell()
for k in modlist:
print "writing relist for module %02X at %X" % (k,curoff)
if k == self.FileID:
self.RelDataSelf == curoff
f.write(struct.pack(">I",k))
f.write(struct.pack(">I",curoff))
curoff += len(RelLists[k])
self.RelData = f.tell()
for k in modlist:
f.write(RelLists[k])
f.seek(0)
f.write(struct.pack(">I",self.FileID))
f.write(struct.pack(">I",self.PrevEntry))
f.write(struct.pack(">I",self.NextEntry))
f.write(struct.pack(">I",self.BlockCount))
f.write(struct.pack(">I",self.BlockTable))
f.write(struct.pack(">I",self.NameOffset))
f.write(struct.pack(">I",self.NameSize))
f.write(struct.pack(">I",self.Version))
f.write(struct.pack(">I",self.BSSSize))
f.write(struct.pack(">I",self.RelData))
f.write(struct.pack(">I",self.RelList))
f.write(struct.pack(">I",self.RelListSize))
f.write(struct.pack(">B",self.ConstructorIndex))
f.write(struct.pack(">B",self.DestructorIndex))
f.write(struct.pack(">B",self.GetSrcIndex))
f.write(struct.pack(">B",self.Last))
f.write(struct.pack(">I",self.Init))
f.write(struct.pack(">I",self.Finalize))
f.write(struct.pack(">I",self.GetSrc))
f.write(struct.pack(">I",self.Align))
f.write(struct.pack(">I",self.BSSAlign))
f.write(struct.pack(">I",self.RelDataSelf))
f.close()
def __init__(self,filename):
global CUR_REL
f = open(filename,"rb")
self.FileID = struct.unpack(">I",f.read(4))[0]
self.PrevEntry = struct.unpack(">I",f.read(4))[0]
self.NextEntry = struct.unpack(">I",f.read(4))[0]
self.BlockCount = struct.unpack(">I",f.read(4))[0]
self.BlockTable = struct.unpack(">I",f.read(4))[0]
self.NameOffset = struct.unpack(">I",f.read(4))[0]
self.NameSize = struct.unpack(">I",f.read(4))[0]
self.Version = struct.unpack(">I",f.read(4))[0]
self.BSSSize = struct.unpack(">I",f.read(4))[0]
self.RelData = struct.unpack(">I",f.read(4))[0]
self.RelList = struct.unpack(">I",f.read(4))[0]
self.RelListSize = struct.unpack(">I",f.read(4))[0]
self.ConstructorIndex = struct.unpack(">B",f.read(1))[0]
self.DestructorIndex = struct.unpack(">B",f.read(1))[0]
self.GetSrcIndex = struct.unpack(">B",f.read(1))[0]
self.Last = struct.unpack(">B",f.read(1))[0]
self.Init = struct.unpack(">I",f.read(4))[0]
self.Finalize = struct.unpack(">I",f.read(4))[0]
self.GetSrc = struct.unpack(">I",f.read(4))[0]
self.Align = struct.unpack(">I",f.read(4))[0]
self.BSSAlign = struct.unpack(">I",f.read(4))[0]
self.RelDataSelf = struct.unpack(">I",f.read(4))[0]
self.Blocks = []
for i,v in self.__dict__.items():
print i,v
print hex(f.tell())
#Read in blocks
f.seek(self.BlockTable)
for i in range(0,self.BlockCount):
f.seek(self.BlockTable+i*8)
relblock = RelBlock()
relblock.RelFile = self
relblock.Index = i
if len(BLOCKNAMES) > relblock.Index:
relblock.Name = BLOCKNAMES[relblock.Index]
else:
relblock.Name = "Unk%02d" % relblock.Index
tmp = struct.unpack(">I",f.read(4))[0]
relblock.Offset = tmp & ~0x03
relblock.Comments = {}
relblock.Size = struct.unpack(">I",f.read(4))[0]
relblock.Flags = tmp & 0x03
f.seek(relblock.Offset)
relblock.Data = f.read(relblock.Size)
print relblock
self.Blocks.append(relblock)
#RelLists
self.Rels = []
index = 0
for i in range(0,self.RelListSize/8):
f.seek(self.RelList+i*8)
ID = struct.unpack(">I",f.read(4))[0]
FileOffset = struct.unpack(">I",f.read(4))[0]
print "Reading RelList->%02X @%X" % (ID,FileOffset)
f.seek(FileOffset)
cmd = RelCommand(f)
while cmd.Command != 0xCB:#CB Command ends a list
if cmd.Command == 0xCA:#CA Command switches block
offset = 0
curBlock = cmd.TargetBlockIndex
else:
offset += cmd.Inc
cmd.Offset = offset
cmd.TargetModuleID = ID
if self.FileID == ID:
cmd.Internal = 1
cmd.Index = index
index = index+1
cmd.Block = curBlock
if curBlock == 2:
cmd.Comment = "InitBlock[%02d]" % (cmd.Offset/4)
if curBlock == 3:
cmd.Comment = "FinalBlock[%02d]" % (cmd.Offset/4)
#print cmd
self.Rels.append(cmd)
cmd = RelCommand(f)
def dumpBlocks(self):
for block in self.Blocks:
print "Dumping...",block
block.dumpData()
def dumpFunctions(self):
log = open("working/funcdata.txt","w")
objblock = self.Blocks[5]
objrels = filter(lambda rel: rel.Command == 1 and self.FileID == rel.TargetModuleID and 5 == rel.TargetBlockIndex and rel.Block == 5,self.Rels)
it = iter(self.Rels)
commandindex = 0
Types = {}
inher = []
ic = 0
for rel in objrels:
ic = ic + 1
if rel in inher:
continue
print ic,len(objrels)
ptrblock = objblock.RelAt(rel.Operand)
if ptrblock == None:
continue
s = objblock.strat(ptrblock.Operand)
if len(s) == 0:
continue
SCOPE = struct.unpack_from(">i",objblock.Data,rel.Offset+4)[0]
#iterate through inheritances
if s not in Types:
Types[s] = {}
Types[s][0] = "null"
inheritptr = objblock.RelAt(ptrblock.Offset+4)
inherit = None
if inheritptr != None:
inherit = objblock.RelAt(inheritptr.Operand)
#this points to a rel that points to a delaration
bs = "unk"
while inherit != None:
inher.append(inherit)
inheractual = objblock.RelAt(inherit.Operand)#this points to the declaration
TARGET_SCOPE = struct.unpack_from(">i",objblock.Data,inherit.Offset+4)[0]
Types[s][TARGET_SCOPE] = objblock.strat(inheractual.Operand)
inherit = objblock.RelAt(inherit.Offset+8)
#print sorted(Types[s].keys())
if -SCOPE in Types[s]:
bs = Types[s][-SCOPE]
else:
bs = "null"
func = objblock.RelAt(rel.Offset+8)
i = 0
log.write(hex(rel.Offset)+" ")
log.write(s+"->"+bs)
log.write("#"+str(SCOPE/4)+"\n")
while func != None and func.TargetBlockIndex != 5:
log.write("\t[%02d] %s \n"%(i,func))
log.flush()
func.Comment = "%s:%s[%d]" % (s,bs,i)
i += 1
func = objblock.RelAt(func.Offset+4)
log.close()
def dumpObjects(self):
log = open("working/classdata.txt","w")
objblock = self.Blocks[5]
objrels = filter(lambda rel: self.FileID == rel.TargetModuleID and 5 == rel.TargetBlockIndex and rel.Block != 5,self.Rels)
it = iter(self.Rels)
commandindex = 0
for rel in it:
if rel.Command != 1 or self.FileID != rel.TargetModuleID or 5 != rel.Block:
continue
commandindex = 0
if 5 != rel.TargetBlockIndex:
continue
s = objblock.strat(rel.Operand)
if len(s) == 0:
continue
log.write(str(rel)+"\n")
log.write(s+"\n")
ptrrel = it.next()
curOff = ptrrel.Operand
startOff = rel.Offset
while curOff < startOff:
curRel = objblock.RelAt(curOff)
curOff += 8
if curRel == None:
log.write("No rel@%X\n"%curOff)
break
curRel = objblock.RelAt(curRel.Operand)
#print curRel
if curRel == None:
continue
log.write("-"+objblock.strat(curRel.Operand)+"\n")
log.write("\n")
log.close()
def somescriptfunc():
# does something
rel = RelFile("M:/ProjectM/extracted_ntsc/module/sora_melee.rel")
#rel.toFile("orig.rel")
rel.dumpFunctions()
rel.dumpBlocks()
#rel.dumpObjects()
#rel.readBlocks()
rel.toFile("out.rel")
#compileFile("working/ASM.asm")
if __name__ == "__main__":
# do something if this script is invoked
# as python scriptname. Otherwise, gets ignored.
import cProfile
#cProfile.run('somescriptfunc()')
somescriptfunc()
#sys.stdout = open("log.txt","wb")
| |
#!/usr/bin/env python
# APM automatic test suite
# Andrew Tridgell, October 2011
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
import util
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
def get_default_params(atype):
'''get default parameters'''
# use rover simulator so SITL is not starved of input
from pymavlink import mavutil
HOME=mavutil.location(40.071374969556928,-105.22978898137808,1583.702759,246)
sim_cmd = util.reltopdir('Tools/autotest/pysim/sim_wrapper.py') + ' --frame=rover --rate=200 --speedup=100 --home=%f,%f,%u,%u' % (
HOME.lat, HOME.lng, HOME.alt, HOME.heading)
runsim = pexpect.spawn(sim_cmd, logfile=sys.stdout, timeout=10)
runsim.delaybeforesend = 0
runsim.expect('Starting at lat')
sil = util.start_SIL(atype, wipe=True)
mavproxy = util.start_MAVProxy_SIL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL(atype)
mavproxy = util.start_MAVProxy_SIL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = 'buildlogs/%s-defaults.parm' % atype
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
util.pexpect_close(runsim)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def build_all():
'''run the build_all.sh script'''
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
'''run the build_binaries.sh script'''
print("Running build_binaries.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_binaries.sh')
copy=util.reltopdir('./build_binaries.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_binaries.sh")
return False
return True
def build_devrelease():
'''run the build_devrelease.sh script'''
print("Running build_devrelease.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_devrelease.sh')
copy=util.reltopdir('./build_devrelease.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_devrelease.sh")
return False
return True
def build_examples():
'''run the build_examples.sh script'''
print("Running build_examples.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_examples.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_examples.sh")
return False
return True
def build_parameters():
'''run the param_parse.py script'''
print("Running param_parse.py")
if util.run_cmd(util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), dir=util.reltopdir('.')) != 0:
print("Failed param_parse.py")
return False
return True
def convert_gpx():
'''convert any tlog files to GPX and KML'''
import glob
mavlog = glob.glob("buildlogs/*.tlog")
for m in mavlog:
util.run_cmd(util.reltopdir("../mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m,m))
return True
def test_prerequisites():
'''check we have the right directories and tools to run tests'''
print("Testing prerequisites")
util.mkdir_p('buildlogs')
return True
def alarm_handler(signum, frame):
'''handle test timeout'''
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
############## main program #############
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
opts, args = parser.parse_args()
import arducopter, arduplane, apmrover2
steps = [
'prerequisites',
'build.All',
'build.Binaries',
'build.DevRelease',
'build.Examples',
'build.Parameters',
'build2560.ArduPlane',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'build2560.APMrover2',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'fly.CopterAVC',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
def skip_step(step):
'''see if a step should be skipped'''
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return True
return False
def run_step(step):
'''run one step'''
# remove old logs
util.run_cmd('/bin/rm -f logs/*.BIN logs/LASTLOG.TXT')
if step == "prerequisites":
return test_prerequisites()
if step == 'build.ArduPlane':
return util.build_SIL('ArduPlane')
if step == 'build.APMrover2':
return util.build_SIL('APMrover2')
if step == 'build.ArduCopter':
return util.build_SIL('ArduCopter')
if step == 'build2560.ArduPlane':
return util.build_AVR('ArduPlane', board='mega2560')
if step == 'build2560.APMrover2':
return util.build_AVR('APMrover2', board='mega2560')
if step == 'defaults.ArduPlane':
return get_default_params('ArduPlane')
if step == 'defaults.ArduCopter':
return get_default_params('ArduCopter')
if step == 'defaults.APMrover2':
return get_default_params('APMrover2')
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.CopterAVC':
return arducopter.fly_CopterAVC(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(viewerip=opts.viewerip, map=opts.map)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(viewerip=opts.viewerip, map=opts.map)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.DevRelease':
return build_devrelease()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
'''test result class'''
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
'''test result file'''
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
'''test results class'''
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, dir=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
'''add a result'''
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
'''add a result file'''
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
'''add a result image'''
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
'''add a set of files'''
import glob
for f in glob.glob('buildlogs/%s' % pattern):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
'''add a set of images'''
import glob
for f in glob.glob('buildlogs/%s' % pattern):
self.addimage(name, os.path.basename(f))
def write_webresults(results):
'''write webpage results'''
from pymavlink.generator import mavtemplate
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open("buildlogs/%s" % os.path.basename(h), mode='w')
t.write(f, html, results)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, 'buildlogs/%s' % os.path.basename(f))
def write_fullresults():
'''write out full results set'''
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*-log.bin')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane code size', 'ArduPlane.sizes.txt')
results.addfile('ArduPlane stack sizes', 'ArduPlane.framesizes.txt')
results.addfile('ArduPlane defaults', 'ArduPlane-defaults.parm')
results.addglob("ArduPlane log", 'ArduPlane-*.BIN')
results.addglob("ArduPlane core", 'ArduPlane.core')
results.addglob("ArduPlane ELF", 'ArduPlane.elf')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter code size', 'ArduCopter.sizes.txt')
results.addfile('ArduCopter stack sizes', 'ArduCopter.framesizes.txt')
results.addfile('ArduCopter defaults', 'ArduCopter-defaults.parm')
results.addglob("ArduCopter log", 'ArduCopter-*.BIN')
results.addglob("ArduCopter core", 'ArduCopter.core')
results.addglob("ArduCopter elf", 'ArduCopter.elf')
results.addglob("CopterAVC log", 'CopterAVC-*.BIN')
results.addglob("CopterAVC core", 'CopterAVC.core')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 code size', 'APMrover2.sizes.txt')
results.addfile('APMrover2 stack sizes', 'APMrover2.framesizes.txt')
results.addfile('APMrover2 defaults', 'APMrover2-defaults.parm')
results.addglob("APMrover2 log", 'APMrover2-*.BIN')
results.addglob("APMrover2 core", 'APMrover2.core')
results.addglob("APMrover2 ELF", 'APMrover2.elf')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
results = TestResults()
def check_logs(step):
'''check for log files from a step'''
print("check step: ", step)
if step.startswith('fly.'):
vehicle = step[4:]
elif step.startswith('drive.'):
vehicle = step[6:]
else:
return
logs = glob.glob("logs/*.BIN")
for log in logs:
bname = os.path.basename(log)
newname = "buildlogs/%s-%s" % (vehicle, bname)
print("Renaming %s to %s" % (log, newname))
os.rename(log, newname)
corefile = "core"
if os.path.exists(corefile):
newname = "buildlogs/%s.core" % vehicle
print("Renaming %s to %s" % (corefile, newname))
os.rename(corefile, newname)
util.run_cmd('/bin/cp A*/A*.elf ../buildlogs', dir=util.reltopdir('.'))
def run_tests(steps):
'''run a list of steps'''
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
if skip_step(step):
continue
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if not run_step(step):
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
except Exception, msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
check_logs(step)
continue
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
check_logs(step)
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
util.mkdir_p('buildlogs')
lck = util.lock_file('buildlogs/autotest.lck')
if lck is None:
print("autotest is locked - exiting")
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
for s in steps:
if fnmatch.fnmatch(s.lower(), a.lower()):
matched.append(s)
steps = matched
try:
if not run_tests(steps):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
| |
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from webob import exc
import webtest
from neutron.extensions import firewall
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_api_v2_extension
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class FirewallExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(FirewallExtensionTestCase, self).setUp()
plural_mappings = {'firewall_policy': 'firewall_policies'}
self._setUpExtension(
'neutron.extensions.firewall.FirewallPluginBase',
constants.FIREWALL, firewall.RESOURCE_ATTRIBUTE_MAP,
firewall.Firewall, 'fw', plural_mappings=plural_mappings)
def test_create_firewall(self):
fw_id = _uuid()
data = {'firewall': {'description': 'descr_firewall1',
'name': 'firewall1',
'admin_state_up': True,
'firewall_policy_id': _uuid(),
'shared': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall'])
return_value.update({'id': fw_id})
# since 'shared' is hidden
del return_value['shared']
instance = self.plugin.return_value
instance.create_firewall.return_value = return_value
res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall.assert_called_with(mock.ANY,
firewall=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_list(self):
fw_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': fw_id}]
instance = self.plugin.return_value
instance.get_firewalls.return_value = return_value
res = self.api.get(_get_path('fw/firewalls', fmt=self.fmt))
instance.get_firewalls.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_get(self):
fw_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.get_firewall.return_value = return_value
res = self.api.get(_get_path('fw/firewalls',
id=fw_id, fmt=self.fmt))
instance.get_firewall.assert_called_with(mock.ANY,
fw_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_update(self):
fw_id = _uuid()
update_data = {'firewall': {'name': 'new_name'}}
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.update_firewall.return_value = return_value
res = self.api.put(_get_path('fw/firewalls', id=fw_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall.assert_called_with(mock.ANY, fw_id,
firewall=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_delete(self):
self._test_entity_delete('firewall')
def _test_create_firewall_rule(self, src_port, dst_port):
rule_id = _uuid()
data = {'firewall_rule': {'description': 'descr_firewall_rule1',
'name': 'rule1',
'shared': False,
'protocol': 'tcp',
'ip_version': 4,
'source_ip_address': '192.168.0.1',
'destination_ip_address': '127.0.0.1',
'source_port': src_port,
'destination_port': dst_port,
'action': 'allow',
'enabled': True,
'tenant_id': _uuid()}}
expected_ret_val = copy.copy(data['firewall_rule'])
expected_ret_val['source_port'] = str(src_port)
expected_ret_val['destination_port'] = str(dst_port)
expected_call_args = copy.copy(expected_ret_val)
expected_ret_val['id'] = rule_id
instance = self.plugin.return_value
instance.create_firewall_rule.return_value = expected_ret_val
res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_rule.assert_called_with(
mock.ANY,
firewall_rule={'firewall_rule': expected_call_args})
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], expected_ret_val)
def test_create_firewall_rule_with_integer_ports(self):
self._test_create_firewall_rule(1, 10)
def test_create_firewall_rule_with_string_ports(self):
self._test_create_firewall_rule('1', '10')
def test_create_firewall_rule_with_port_range(self):
self._test_create_firewall_rule('1:20', '30:40')
def test_firewall_rule_list(self):
rule_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': rule_id}]
instance = self.plugin.return_value
instance.get_firewall_rules.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules', fmt=self.fmt))
instance.get_firewall_rules.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_rule_get(self):
rule_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.get_firewall_rule.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules',
id=rule_id, fmt=self.fmt))
instance.get_firewall_rule.assert_called_with(mock.ANY,
rule_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_update(self):
rule_id = _uuid()
update_data = {'firewall_rule': {'action': 'deny'}}
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.update_firewall_rule.return_value = return_value
res = self.api.put(_get_path('fw/firewall_rules', id=rule_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_rule.assert_called_with(
mock.ANY,
rule_id,
firewall_rule=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_delete(self):
self._test_entity_delete('firewall_rule')
def test_create_firewall_policy(self):
policy_id = _uuid()
data = {'firewall_policy': {'description': 'descr_firewall_policy1',
'name': 'new_fw_policy1',
'shared': False,
'firewall_rules': [_uuid(), _uuid()],
'audited': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall_policy'])
return_value.update({'id': policy_id})
instance = self.plugin.return_value
instance.create_firewall_policy.return_value = return_value
res = self.api.post(_get_path('fw/firewall_policies',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_policy.assert_called_with(
mock.ANY,
firewall_policy=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_list(self):
policy_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': policy_id}]
instance = self.plugin.return_value
instance.get_firewall_policies.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
fmt=self.fmt))
instance.get_firewall_policies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_policy_get(self):
policy_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.get_firewall_policy.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
id=policy_id, fmt=self.fmt))
instance.get_firewall_policy.assert_called_with(mock.ANY,
policy_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update(self):
policy_id = _uuid()
update_data = {'firewall_policy': {'audited': True}}
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.update_firewall_policy.return_value = return_value
res = self.api.put(_get_path('fw/firewall_policies',
id=policy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_policy.assert_called_with(
mock.ANY,
policy_id,
firewall_policy=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update_malformed_rules(self):
# emulating client request when no rule uuids are provided for
# --firewall_rules parameter
update_data = {'firewall_policy': {'firewall_rules': True}}
# have to check for generic AppError
self.assertRaises(
webtest.AppError,
self.api.put,
_get_path('fw/firewall_policies', id=_uuid(), fmt=self.fmt),
self.serialize(update_data))
def test_firewall_policy_delete(self):
self._test_entity_delete('firewall_policy')
def test_firewall_policy_insert_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
ref_firewall_rule_id = _uuid()
insert_data = {'firewall_rule_id': firewall_rule_id,
'insert_before': ref_firewall_rule_id,
'insert_after': None}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': [ref_firewall_rule_id,
firewall_rule_id]}}
instance = self.plugin.return_value
instance.insert_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="insert_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(insert_data))
instance.insert_rule.assert_called_with(mock.ANY, firewall_policy_id,
insert_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
def test_firewall_policy_remove_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
remove_data = {'firewall_rule_id': firewall_rule_id}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': []}}
instance = self.plugin.return_value
instance.remove_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="remove_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(remove_data))
instance.remove_rule.assert_called_with(mock.ANY, firewall_policy_id,
remove_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
class TestFirewallAttributeValidators(base.BaseTestCase):
def test_validate_port_range(self):
msg = firewall._validate_port_range(None)
self.assertIsNone(msg)
msg = firewall._validate_port_range('10')
self.assertIsNone(msg)
msg = firewall._validate_port_range(10)
self.assertIsNone(msg)
msg = firewall._validate_port_range(-1)
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:20')
self.assertIsNone(msg)
msg = firewall._validate_port_range('1:65535')
self.assertIsNone(msg)
msg = firewall._validate_port_range('0:65535')
self.assertEqual(msg, "Invalid port '0'")
msg = firewall._validate_port_range('1:65536')
self.assertEqual(msg, "Invalid port '65536'")
msg = firewall._validate_port_range('abc:efg')
self.assertEqual(msg, "Port 'abc' is not a valid number")
msg = firewall._validate_port_range('1:efg')
self.assertEqual(msg, "Port 'efg' is not a valid number")
msg = firewall._validate_port_range('-1:10')
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000:10')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('1:-10')
self.assertEqual(msg, "Invalid port '-10'")
def test_validate_ip_or_subnet_or_none(self):
msg = firewall._validate_ip_or_subnet_or_none(None)
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.1')
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.0/24')
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '1.1.1.1 has whitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\twhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\nwhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
# Valid - IPv4
cidr = "10.0.2.0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 without final octets
cidr = "fe80::/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 with final octets
cidr = "fe80::0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
cidr = "fe80::"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - IPv6 with final octets, missing mask
cidr = "fe80::0"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - Address format error
cidr = 'invalid'
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (cidr,
cidr))
| |
"""
==========
tutormagic
==========
Magics to display pythontutor.com in the notebook.
Usage
=====
To enable the magics below, execute ``%load_ext tutormagic``.
``%%tutormagic``
{tutormagic_DOC}
"""
__version__="0.3.1"
#-----------------------------------------------------------------------------
# Copyright (C) 2015-2017 Kiko Correoso and the pythontutor.com developers
#
# Distributed under the terms of the MIT License. The full license is in
# the file LICENSE, distributed as part of this software.
#
# Contributors:
# kikocorreoso, jquacinella, hkarl, naereen, tomsimonart
#-----------------------------------------------------------------------------
import webbrowser
import warnings
warnings.simplefilter("always")
import sys
if sys.version_info.major == 2 and sys.version_info.minor == 7:
from urllib import quote
elif sys.version_info.major == 3 and sys.version_info.minor >= 3:
from urllib.parse import quote
else:
warnings.warn("This extension has not been tested on this Python version",
UserWarning)
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
from IPython.utils.text import dedent
from IPython.display import display, IFrame, HTML
@magics_class
class TutorMagics(Magics):
"""
A magic function to show pythontutor.com frame from a code cell.
"""
def __init__(self, shell):
super(TutorMagics, self).__init__(shell)
@skip_doctest
@magic_arguments()
@argument(
'-l', '--lang', action='store', nargs = 1,
help="Languages to be displayed within the iframe or in a new tab. "
"Possible values are: "
"python2, python3, py3anaconda, java, javascript, typescript, ruby, c, c++"
)
@argument(
'-h', '--height', action='store', nargs=1,
help="Change the height of the output area display in pixels"
)
@argument(
'-t', '--tab', action='store_true',
help="Open pythontutor in a new tab",
)
@argument(
'-s', '--secure', action='store_true',
help="Open pythontutor using https in a new tab",
)
@argument(
'-k', '--link', action='store_true',
help="Just display a link to pythontutor",
)
@argument(
'-r', '--run', action='store_true',
help="Run the cell code also in the notebook",
)
@argument(
'--cumulative', action='store_true', default=False,
help="PythonTutor config: Set the cumulative option to True",
)
@argument(
'--heapPrimitives', action='store_true', default=False,
help="PythonTutor config: Render objects on the heap",
)
@argument(
'--textReferences', action='store_true', default=False,
help="PythonTutor config: Use text labels for references",
)
@argument(
'--curInstr', action='store', default=0,
help="PythonTutor config: Start at the defined step",
)
@argument(
'--verticalStack', action='store_true', default=False,
help="Set visualization to stack atop one another",
)
#@needs_local_scope
@argument(
'code',
nargs='*',
)
@cell_magic
def tutor(self, line, cell=None, local_ns=None):
'''
Create an iframe embedding the pythontutor.com page
with the code included in the code cell::
In [1]: %%tutor -l 'python3'
....: a = 1
....: b = 1
....: a + b
[You will see an iframe with the pythontutor.com page including the
code above]
'''
args = parse_argstring(self.tutor, line)
if args.lang:
if args.lang[0].lower() in ['python2',
'python3',
'py3anaconda',
'java',
'javascript',
'typescript',
'ruby',
'c',
'c++']:
lang = args.lang[0].lower()
else:
raise ValueError(
"{} not supported. Only the following options are allowed: "
"'python2', 'python3', 'py3anaconda', 'java', 'javascript', "
"'typescript', 'ruby', 'c', 'c++'".format(args.lang[0]))
else:
lang = "python3"
# Sometimes user will want SSL pythontutor site if
# jupyter/hub is using SSL as well
protocol = 'http://'
if args.secure:
protocol = 'https://'
url = protocol + "pythontutor.com/iframe-embed.html#code="
url += quote(cell)
url += "&origin=opt-frontend.js"
# Add custom pythontutor options, defaults to all false
url += "&cumulative={}".format(str(args.cumulative).lower())
url += "&heapPrimitives={}".format(str(args.heapPrimitives).lower())
url += "&textReferences={}".format(str(args.textReferences).lower())
url += "&curInstr={}&".format(str(args.curInstr).lower())
url += "&verticalStack={}&".format(str(args.verticalStack).lower())
# Setup the language URL param
if lang == "python3":
url += "py=3"
elif lang == "python2":
url += "py=2"
elif lang == "py3anaconda":
url += "py=py3anaconda"
elif lang == "java":
url += "py=java"
elif lang == "javascript":
url += "py=js"
elif lang == "typescript":
url += "py=ts"
elif lang == "ruby":
url += "py=ruby"
elif lang == "c":
url += "py=c"
elif lang == "c++":
url += "py=cpp"
# Add the rest of the misc options to pythontutor
url += "&rawInputLstJSON=%5B%5D&codeDivWidth=50%25&codeDivHeight=100%25"
# Display in new tab, or in iframe, or link to it via anchor link?
if args.link:
# Create html link to pythontutor
css = ("box-sizing: border-box; "
"padding: 0 5px; border: "
"1px solid #CFCFCF;")
display(
HTML(
data=(
'<div class="text-center"><strong>'
'<a style="{0}" target="_" href={1}>Python Tutor</a>'
'</strong></div>'.format(css, url)
)
)
)
elif args.tab:
# Open up a new tab in the browser to pythontutor URL
webbrowser.open_new_tab(url)
else:
# Display the results in the output area
if args.height:
display(IFrame(
url, height = int(args.height[0]), width = "100%"
))
else:
display(IFrame(url, height = 350, width = "100%"))
if args.run:
# Run cell like normal
self.shell.run_cell(cell)
__doc__ = __doc__.format(
tutormagic_DOC = dedent(TutorMagics.tutor.__doc__))
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(TutorMagics)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
""" Key value store interface of MXNet for parameter synchronization."""
from __future__ import absolute_import
from array import array
import ctypes
import pickle
from .ndarray import NDArray
from .ndarray import _ndarray_cls
from .base import _LIB, c_str_array, c_handle_array, c_array, c_array_buf, c_str
from .base import check_call, string_types, mx_uint, py_str
from .base import NDArrayHandle, KVStoreHandle
from . import optimizer as opt
def _ctype_key_value(keys, vals):
"""
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys)
def _updater_wrapper(updater):
"""A wrapper for the user-defined handle."""
def updater_handle(key, lhs_handle, rhs_handle, _):
""" ctypes function """
lhs = _ndarray_cls(NDArrayHandle(lhs_handle))
rhs = _ndarray_cls(NDArrayHandle(rhs_handle))
updater(key, lhs, rhs)
return updater_handle
class KVStore(object):
"""A key-value store for synchronization of values, over multiple devices."""
def __init__(self, handle):
"""Initializes a new KVStore.
Parameters
----------
handle : KVStoreHandle
`KVStore` handle of C API.
"""
assert isinstance(handle, KVStoreHandle)
self.handle = handle
self._updater = None
self._updater_func = None
self._str_updater_func = None
def __del__(self):
check_call(_LIB.MXKVStoreFree(self.handle))
def init(self, key, value):
""" Initializes a single or a sequence of key-value pairs into the store.
For each key, one must `init` it before calling `push` or `pull`.
When multiple workers invoke `init` for the same key, only
the value supplied by worker with rank `0` is used. This function returns
after data has been initialized successfully.
Parameters
----------
key : str, int, or sequence of str or int
The keys.
value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray
Values corresponding to the keys.
Examples
--------
>>> # init a single key-value pair
>>> shape = (2,3)
>>> kv = mx.kv.create('local')
>>> kv.init('3', mx.nd.ones(shape)*2)
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # init a list of key-value pairs
>>> keys = ['5', '7', '9']
>>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))
>>> # init a row_sparse value
>>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse'))
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStoreInitEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
else:
check_call(_LIB.MXKVStoreInit(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
def push(self, key, value, priority=0):
""" Pushes a single or a sequence of key-value pairs into the store.
This function returns immediately after adding an operator to the engine.
The actual operation is executed asynchronously. If there are consecutive
pushes to the same key, there is no guarantee on the serialization of pushes.
The execution of a push does not guarantee that all previous pushes are
finished.
There is no synchronization between workers.
One can use ``_barrier()`` to sync all workers.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray,
or list of list of NDArray or RowSparseNDArray
Values corresponding to the keys.
priority : int, optional
The priority of the push operation.
Higher priority push operations are likely to be executed before
other push actions.
Examples
--------
>>> # push a single key-value pair
>>> kv.push('3', mx.nd.ones(shape)*8)
>>> kv.pull('3', out=a) # pull out the value
>>> print a.asnumpy()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and the push
>>> gpus = [mx.gpu(i) for i in range(4)]
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.push('3', b)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a list of keys.
>>> # single device
>>> keys = ['4', '5', '6']
>>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> keys = ['7', '8', '9']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.push(keys, b)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a row_sparse value
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape))
>>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse'))
>>> # pull out the value
>>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStorePushEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePush(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
def pull(self, key, out=None, priority=0):
""" Pulls a single value or a sequence of values from the store.
This function returns immediately after adding an operator to the engine.
Subsequent attempts to read from the `out` variable will be blocked until the
pull operation completes.
`pull` is executed asynchronously after all previous `pull` calls and only
the last `push` call for the same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
For `RowSparseNDArray` values, this call is ignored,
please use ``row_sparse_pull`` instead.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: NDArray or list of NDArray or list of list of NDArray
Values corresponding to the keys.
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
Examples
--------
>>> # pull a single key-value pair
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull into multiple devices
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.pull('3', out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull a list of key-value pairs.
>>> # On single device
>>> keys = ['5', '7', '9']
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # On multiple devices
>>> keys = ['6', '8', '10']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
"""
assert(out is not None)
ckeys, cvals, use_str_keys = _ctype_key_value(key, out)
if use_str_keys:
check_call(_LIB.MXKVStorePullEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePull(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
def row_sparse_pull(self, key, out=None, priority=0, row_ids=None):
""" Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \
from the store with specified row_ids.
`row_sparse_pull` is executed asynchronously after all previous
`pull`/`row_sparse_pull` calls and the last `push` call for the
same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray
Values corresponding to the keys. The stype is expected to be row_sparse
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
row_ids : NDArray or list of NDArray
The row_ids for which to pull for each value. Each row_id is an 1D NDArray \
whose values don't have to be unique nor sorted.
Examples
--------
>>> shape = (3, 3)
>>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse'))
>>> a = mx.nd.sparse.zeros('row_sparse', shape)
>>> row_ids = mx.nd.array([0, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids)
>>> print a.asnumpy()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]
[ 0. 0. 0.]]
"""
assert(out is not None)
assert(row_ids is not None)
ckeys, cvals, use_str_keys = _ctype_key_value(key, out)
_, crow_ids, _ = _ctype_key_value(key, row_ids)
assert(len(crow_ids) == len(cvals)), \
"the number of row_ids doesn't match the number of values"
if use_str_keys:
check_call(_LIB.MXKVStorePullRowSparseEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePullRowSparse(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : Optimizer
The new optimizer for the store
Examples
--------
>>> kv = mx.kv.create()
>>> shape = (2, 2)
>>> weight = mx.nd.zeros(shape)
>>> kv.init(3, weight)
>>> # set the optimizer for kvstore as the default SGD optimizer
>>> kv.set_optimizer(mx.optimizer.SGD())
>>> grad = mx.nd.ones(shape)
>>> kv.push(3, grad)
>>> kv.pull(3, out = weight)
>>> # weight is updated via gradient descent
>>> weight.asnumpy()
array([[-0.01, -0.01],
[-0.01, -0.01]], dtype=float32)
"""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
# pylint: disable=invalid-name
if 'dist' in self.type and is_worker.value:
# send the optimizer to server
try:
# use ASCII protocol 0, might be slower, but not a big ideal
optim_str = pickle.dumps(optimizer, 0)
except:
raise
self._send_command_to_servers(0, optim_str)
else:
self._set_updater(opt.get_updater(optimizer))
@property
def type(self):
""" Returns the type of this kvstore.
Returns
-------
type : str
the string type
"""
kv_type = ctypes.c_char_p()
check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type)))
return py_str(kv_type.value)
@property
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value
@property
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
size = ctypes.c_int()
check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
return size.value
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
assert self._updater is not None, "Cannot save states for distributed training"
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states(dump_optimizer))
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self._updater is not None, "Cannot load states for distributed training"
self._updater.set_states(open(fname, 'rb').read())
def _set_updater(self, updater):
"""Sets a push updater into the store.
This function only changes the local store. When running on multiple machines one must
use `set_optimizer`.
Parameters
----------
updater : function
The updater function.
Examples
--------
>>> def update(key, input, stored):
... print "update on key: %d" % key
... stored += input * 2
>>> kv._set_updater(update)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> kv.push('3', mx.nd.ones(shape))
update on key: 3
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 6. 6. 6.]
[ 6. 6. 6.]]
"""
self._updater = updater
# set updater with int keys
_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._updater_func = _updater_proto(_updater_wrapper(updater))
# set updater with str keys
_str_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._str_updater_func = _str_updater_proto(_updater_wrapper(updater))
check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func,
self._str_updater_func, None))
def _barrier(self):
"""Invokes global barrier among all worker nodes.
For example, assume there are `n` machines. We would like machine `0` to first
`init` the values and then have all the workers `pull` the initialized value.
Before pulling, we can place invoke `_barrier()` to guarantee that the
initialization is finished.
"""
check_call(_LIB.MXKVStoreBarrier(self.handle))
def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body)))
def create(name='local'):
"""Creates a new KVStore.
For single machine training, there are two commonly used types:
``local``: Copies all gradients to CPU memory and updates weights there.
``device``: Aggregates gradients and updates weights on GPUs. With this setting,
the KVStore also attempts to use GPU peer-to-peer communication,
potentially accelerating the communication.
For distributed training, KVStore also supports a number of types:
``dist_sync``: Behaves similarly to ``local`` but with one major difference.
With ``dist_sync``, batch-size now means the batch size used on each machine.
So if there are ``n`` machines and we use batch size ``b``,
then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
to ``device`` vs ``local``.
``dist_async``: Performs asynchronous updates.
The weights are updated whenever gradients are received from any machine.
No two updates happen on the same weight at the same time. However, the order is not
guaranteed.
Parameters
----------
name : {'local', 'device', 'dist_sync', 'dist_device_sync', 'dist_async'}
The type of KVStore.
Returns
-------
kv : KVStore
The created KVStore.
"""
if not isinstance(name, string_types):
raise TypeError('name must be a string')
handle = KVStoreHandle()
check_call(_LIB.MXKVStoreCreate(c_str(name),
ctypes.byref(handle)))
return KVStore(handle)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import sys
try:
from exceptions import EOFError
except ImportError:
pass
import collections as cx
typedef_tag, term_tag = "[Typedef]", "[Term]"
GraphEngines = ("pygraphviz", "pydot")
def after_colon(line):
# macro for getting anything after the :
return line.split(":", 1)[1].strip()
def read_until(handle, start):
# read each line until it has a certain start, and then puts
# the start tag back
while 1:
pos = handle.tell()
line = handle.readline()
if not line:
break
if line.startswith(start):
handle.seek(pos)
return
raise EOFError("%s tag cannot be found" % start)
class OBOReader:
"""
parse obo file, usually the most updated can be downloaded from
http://purl.obolibrary.org/obo/go/go-basic.obo
>>> reader = OBOReader()
>>> for rec in reader:
print rec
"""
def __init__(self, obo_file="go-basic.obo"):
try:
self._handle = open(obo_file)
except:
print(("download obo file first\n "
"[http://purl.obolibrary.org/obo/"
"go/go-basic.obo]"), file=sys.stderr)
sys.exit(1)
def __iter__(self):
line = self._handle.readline()
if not line.startswith(term_tag):
read_until(self._handle, term_tag)
while 1:
yield self.__next__()
def __next__(self):
lines = []
line = self._handle.readline()
if not line or line.startswith(typedef_tag):
raise StopIteration
# read until the next tag and save everything in between
while 1:
pos = self._handle.tell() # save current postion for roll-back
line = self._handle.readline()
if not line or (line.startswith(typedef_tag)
or line.startswith(term_tag)):
self._handle.seek(pos) # roll-back
break
lines.append(line)
rec = GOTerm()
for line in lines:
if line.startswith("id:"):
rec.id = after_colon(line)
if line.startswith("alt_id:"):
rec.alt_ids.append(after_colon(line))
elif line.startswith("name:"):
rec.name = after_colon(line)
elif line.startswith("namespace:"):
rec.namespace = after_colon(line)
elif line.startswith("is_a:"):
rec._parents.append(after_colon(line).split()[0])
elif (line.startswith("is_obsolete:") and
after_colon(line) == "true"):
rec.is_obsolete = True
return rec
class GOTerm:
"""
GO term, actually contain a lot more properties than interfaced here
"""
def __init__(self):
self.id = "" # GO:NNNNNNN
self.name = "" # description
self.namespace = "" # BP, CC, MF
self._parents = [] # is_a basestring of parents
self.parents = [] # parent records
self.children = [] # children records
self.level = None # shortest distance from root node
self.depth = None # longest distance from root node
self.is_obsolete = False # is_obsolete
self.alt_ids = [] # alternative identifiers
def __str__(self):
obsolete = "obsolete" if self.is_obsolete else ""
return "%s\tlevel-%02d\tdepth-%02d\t%s [%s] %s" % (self.id, self.level, self.depth,
self.name, self.namespace, obsolete)
def __repr__(self):
return "GOTerm('%s')" % (self.id)
def has_parent(self, term):
for p in self.parents:
if p.id == term or p.has_parent(term):
return True
return False
def has_child(self, term):
for p in self.children:
if p.id == term or p.has_child(term):
return True
return False
def get_all_parents(self):
all_parents = set()
for p in self.parents:
all_parents.add(p.id)
all_parents |= p.get_all_parents()
return all_parents
def get_all_children(self):
all_children = set()
for p in self.children:
all_children.add(p.id)
all_children |= p.get_all_children()
return all_children
def get_all_parent_edges(self):
all_parent_edges = set()
for p in self.parents:
all_parent_edges.add((self.id, p.id))
all_parent_edges |= p.get_all_parent_edges()
return all_parent_edges
def get_all_child_edges(self):
all_child_edges = set()
for p in self.children:
all_child_edges.add((p.id, self.id))
all_child_edges |= p.get_all_child_edges()
return all_child_edges
def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, dp="-"):
"""Write hierarchy for a GO Term record."""
GO_id = self.id
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if include_only is not None and GO_id not in include_only:
return
nrp = short_prt and GO_id in gos_printed
if go_marks is not None:
out.write('{} '.format('>' if GO_id in go_marks else ' '))
if len_dash is not None:
# Default character indicating hierarchy level is '-'.
# '=' is used to indicate a hierarchical path printed in detail previously.
letter = '-' if not nrp or not self.children else '='
dp = ''.join([letter]*depth)
out.write('{DASHES:{N}} '.format(DASHES=dp, N=len_dash))
if num_child is not None:
out.write('{N:>5} '.format(N=len(self.get_all_children())))
out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format(
GO=self.id, L=self.level, D=self.depth, desc=self.name))
# Track GOs previously printed only if needed
if short_prt:
gos_printed.add(GO_id)
# Do not print hierarchy below this turn if it has already been printed
if nrp:
return
depth += 1
if max_depth is not None and depth > max_depth:
return
for p in self.children:
p.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,
include_only, go_marks,
depth, dp)
class GODag(dict):
def __init__(self, obo_file="go-basic.obo"):
self.load_obo_file(obo_file)
def load_obo_file(self, obo_file):
print("load obo file %s" % obo_file, file=sys.stderr)
obo_reader = OBOReader(obo_file)
for rec in obo_reader:
self[rec.id] = rec
for alt in rec.alt_ids:
self[alt] = rec
self.populate_terms()
print(len(self), "nodes imported", file=sys.stderr)
def populate_terms(self):
def _init_level(rec):
if rec.level is None:
if not rec.parents:
rec.level = 0
else:
rec.level = min(_init_level(rec) for rec in rec.parents) + 1
return rec.level
def _init_depth(rec):
if rec.depth is None:
if not rec.parents:
rec.depth = 0
else:
rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1
return rec.depth
# make the parents references to the GO terms
for rec in self.values():
rec.parents = [self[x] for x in rec._parents]
# populate children and levels
for rec in self.values():
for p in rec.parents:
if rec not in p.children:
p.children.append(rec)
if rec.level is None:
_init_level(rec)
if rec.depth is None:
_init_depth(rec)
def write_dag(self, out=sys.stdout):
"""Write info for all GO Terms in obo file, sorted numerically."""
for rec_id, rec in sorted(self.items()):
print(rec, file=out)
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
def write_hier(self, GO_id, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None):
"""Write hierarchy for a GO Term."""
gos_printed = set()
self[GO_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child,
short_prt, include_only, go_marks)
def write_summary_cnts(self, GO_ids, out=sys.stdout):
"""Write summary of level and depth counts for specific GO ids."""
cnts = GODag.get_cnts_levels_depths_recs([self[GO] for GO in GO_ids])
self._write_summary_cnts(cnts, out)
def write_summary_cnts_all(self, out=sys.stdout):
"""Write summary of level and depth counts for all active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(set(self.values()))
self._write_summary_cnts(cnts, out)
def write_summary_cnts_rec(self, out=sys.stdout):
"""Write summary of level and depth counts for active GO Terms."""
cnts = self.get_cnts_levels_depths_recs(recs)
self._write_summary_cnts(cnts, out)
def _write_summary_cnts(self, cnts, out=sys.stdout):
"""Write summary of level and depth counts for active GO Terms."""
# Count level(shortest path to root) and depth(longest path to root)
# values for all unique GO Terms.
max_val = max(max(dep for dep in cnts['depth']),
max(lev for lev in cnts['level']))
nss = ['biological_process', 'molecular_function', 'cellular_component']
out.write('Dep <-Depth Counts-> <-Level Counts->\n')
out.write('Lev BP MF CC BP MF CC\n')
out.write('--- ---- ---- ---- ---- ---- ----\n')
for i in range(max_val+1):
vals = ['{:>5}'.format(cnts[desc][i][ns]) for desc in cnts for ns in nss]
out.write('{:>02} {}\n'.format(i, ' '.join(vals)))
@staticmethod
def get_cnts_levels_depths_recs(recs):
"""Collect counts of levels and depths in a Group of GO Terms."""
cnts = cx.defaultdict(lambda: cx.defaultdict(cx.Counter))
for rec in recs:
if not rec.is_obsolete:
cnts['level'][rec.level][rec.namespace] += 1
cnts['depth'][rec.depth][rec.namespace] += 1
return cnts
@staticmethod
def id2int(GO_id): return int(GO_id.replace("GO:", "", 1))
def query_term(self, term, verbose=False):
if term not in self:
print("Term %s not found!" % term, file=sys.stderr)
return
rec = self[term]
print(rec, file=sys.stderr)
if verbose:
print("all parents:", rec.get_all_parents(), file=sys.stderr)
print("all children:", rec.get_all_children(), file=sys.stderr)
return rec
def paths_to_top(self, term, verbose=False):
""" Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id of the GO term, where the paths begin (i.e. the
accession 'GO:0003682')
Returns:
--------
- a list of lists of GO Terms
"""
# error handling consistent with original authors
if term not in self:
print("Term %s not found!" % term, file=sys.stderr)
return
def _paths_to_top_recursive(rec):
if rec.level == 0:
return [[rec]]
paths = []
for parent in rec.parents:
top_paths = _paths_to_top_recursive(parent)
for top_path in top_paths:
top_path.append(rec)
paths.append(top_path)
return paths
go_term = self[term]
return _paths_to_top_recursive(go_term)
def _label_wrap(self, label):
wrapped_label = r"%s\n%s" % (label,
self[label].name.replace(",", r"\n"))
return wrapped_label
def make_graph_pydot(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""draw AMIGO style network, lineage containing one query record."""
import pydot
G = pydot.Dot(graph_type='digraph', dpi="{}".format(dpi)) # Directed Graph
edgeset = set()
usr_ids = [rec.id for rec in recs]
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
lw = self._label_wrap
rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts])
nodes = {str(ID):pydot.Node(
lw(ID).replace("GO:",""), # Node name
shape="box",
style="rounded, filled",
# Highlight query terms in plum:
fillcolor="beige" if ID not in usr_ids else "plum",
color=nodecolor)
for ID in rec_id_set}
# add nodes explicitly via add_node
for rec_id, node in nodes.items():
G.add_node(node)
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
G.add_edge(pydot.Edge(nodes[target], nodes[src],
shape="normal",
color=edgecolor,
label="is_a",
dir="back"))
return G
def make_graph_pygraphviz(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
# draw AMIGO style network, lineage containing one query record
import pygraphviz as pgv
G = pgv.AGraph(name="GO tree")
edgeset = set()
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
edgeset = [(self._label_wrap(a), self._label_wrap(b))
for (a, b) in edgeset]
# add nodes explicitly via add_node
# adding nodes implicitly via add_edge misses nodes
# without at least one edge
for rec in recs:
G.add_node(self._label_wrap(rec.id))
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
G.add_edge(target, src)
G.graph_attr.update(dpi="%d" % dpi)
G.node_attr.update(shape="box", style="rounded,filled",
fillcolor="beige", color=nodecolor)
G.edge_attr.update(shape="normal", color=edgecolor,
dir="back", label="is_a")
# highlight the query terms
for rec in recs:
try:
q = G.get_node(self._label_wrap(rec.id))
q.attr.update(fillcolor="plum")
except:
continue
return G
def draw_lineage(self, recs, nodecolor="mediumseagreen",
edgecolor="lightslateblue", dpi=96,
lineage_img="GO_lineage.png", engine="pygraphviz",
gml=False, draw_parents=True, draw_children=True):
assert engine in GraphEngines
if engine == "pygraphviz":
G = self.make_graph_pygraphviz(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents, draw_children=draw_children)
else:
G = self.make_graph_pydot(recs, nodecolor, edgecolor, dpi,
draw_parents=draw_parents, draw_children=draw_children)
if gml:
import networkx as nx # use networkx to do the conversion
pf = lineage_img.rsplit(".", 1)[0]
NG = nx.from_agraph(G) if engine == "pygraphviz" else nx.from_pydot(G)
del NG.graph['node']
del NG.graph['edge']
gmlfile = pf + ".gml"
nx.write_gml(NG, gmlfile)
print("GML graph written to {0}".format(gmlfile), file=sys.stderr)
print(("lineage info for terms %s written to %s" %
([rec.id for rec in recs], lineage_img)), file=sys.stderr)
if engine == "pygraphviz":
G.draw(lineage_img, prog="dot")
else:
G.write_png(lineage_img)
def update_association(self, association):
bad_terms = set()
for key, terms in list(association.items()):
parents = set()
for term in terms:
try:
parents.update(self[term].get_all_parents())
except:
bad_terms.add(term.strip())
terms.update(parents)
if bad_terms:
print("terms not found: %s" % (bad_terms,), file=sys.stderr)
| |
import urlparse
import datetime
import cPickle as pickle
import statsd
import dateutil
from kardboard.app import cache
from kardboard.app import app
from kardboard.util import ImproperlyConfigured, log_exception
from kardboard.tasks import update_ticket
class TicketHelper(object):
def __init__(self, config, kard):
self.app_config = config
self.card = kard
def get_title(self, key=None):
"""
The title of the ticket
"""
pass
def get_ticket_url(self, key=None):
"""
A URL to the ticket in the orignating system.
"""
pass
def update(self, sync=False):
"""
Schedules a job to update the ticket from its originating system.
If sync is True, then the call is executed immediately as a blocking IO task.
"""
now = datetime.datetime.now()
self.card._ticket_system_updated_at = now
def actually_update(self):
"""
Method called by the scheduled task. Updates the ticket from the originating system.
"""
pass
def login(self, username, password):
"""
Method used to authenticate a user. If successfull, it returns True.
"""
pass
@property
def type(self):
return self.get_type()
def get_type(self):
"""
Method called to extract, if any, the ticket type from the upstream ticket system.
For example: Bug, Feature, Improvement, etc.
"""
pass
def get_version(self):
"""
Method called to extract, if any, the version from the upstream ticket system.
"""
pass
class NullHelper(TicketHelper):
def get_title(self, key=None):
return 'Ticket title'
def get_ticket_url(self, key=None):
return '/404'
def update(self, sync=False):
super(NullHelper, self).update(sync)
test_data = {}
if self.card._service_class:
test_data['service_class'] = self.card._service_class
self.card._ticket_system_data = test_data
return None
def actually_update(self):
return None
def login(self, username, password):
return None
class TestTicketHelper(TicketHelper):
def get_title(self, key=None):
title = ''
if self.card._ticket_system_data:
title = self.card._ticket_system_data.get('summary', '')
else:
self.card.ticket_system.update(sync=True)
title = self.card.ticket_system_data.get('summary', '')
return title
def get_ticket_url(self):
return u"""http://example.com/ticket/%s""" % self.card.key
def update(self, sync=False):
super(TestTicketHelper, self).update()
test_data = {
'summary': u"""Dummy Title from Dummy Ticket System""",
}
if self.card._service_class:
test_data['service_class'] = self.card._service_class
self.card._ticket_system_data = test_data
def login(self, username, password):
return True
def get_version(self):
if self.card:
return self.card._version
else:
return None
class JIRAHelper(TicketHelper):
clients = {}
def __init__(self, config, kard):
super(JIRAHelper, self).__init__(config, kard)
self.logger = app.logger
self.testing = app.config.get('TESTING')
self.statsd = app.statsd.get_client('tickethelpers.JIRAHelper')
self.issues = {}
self._service = None
try:
self.wsdl_url = self.app_config['JIRA_WSDL']
except KeyError:
raise ImproperlyConfigured("You must provide a JIRA_WSDL setting")
try:
self.username, self.password = self.app_config['JIRA_CREDENTIALS']
except KeyError:
raise ImproperlyConfigured(
"You must provide a JIRA_CREDENTIALS setting")
except ValueError:
raise ImproperlyConfigured(
"JIRA_CREDENTIALS should be a two-item tuple (user, pass)")
@property
def cache_prefix(self):
return "jira_%s" % self.wsdl_url
@property
def service(self):
if self._service is None:
self.connect()
return self._service
def connect(self):
auth_key = "offline_auth_%s" % self.cache_prefix
auth = cache.get(auth_key)
client = self.clients.get(self.wsdl_url, None)
if not client:
from suds.client import Client
client = Client(self.wsdl_url)
#We cache the client because there's
#major overhead in instantiating
#and the initial connection
#and since this mostly is run
#by a long running celeryd
#process a simple in-memory
#cache suffices
self.clients[self.wsdl_url] = client
if not auth:
self.logger.warn("Cache miss for %s" % auth_key)
auth = client.service.login(self.username, self.password)
cache.set(auth_key, auth, 60 * 60) # Cache for an hour
self.auth = auth
self._service = client.service
def login(self, username, password):
if not self._service:
self.connect()
try:
auth = self._service.login(username, password)
return auth
except:
return False
def get_issue(self, key=None):
key = key or self.card.key
if self.issues.get(key, None):
return self.issues.get(key)
issue = self.service.getIssue(self.auth, key)
self.issues[key] = issue
return issue
def get_title(self, key=None):
title = ''
if not self.card._ticket_system_data:
self.card.ticket_system.update(sync=True)
title = self.card.ticket_system_data.get('summary', '')
return title
def get_version(self, key=None):
version = ''
if not self.card._ticket_system_data:
self.card.ticket_system.update(sync=True)
versions = self.card.ticket_system_data.get('fixVersions', [])
if versions:
try:
version = versions[0]['name']
except IndexError:
print versions
raise
return version
def get_type(self, key=None):
ticket_type = None
if not self.card._ticket_system_data:
self.card.ticket_system.update(sync=True)
ticket_type = self.card._ticket_system_data.get('type', {}).get('name', '')
ticket_type.strip()
return ticket_type
def get_service_class(self):
service_class = None
if not self.card._ticket_system_data:
self.card.ticket_system.update(sync=True)
service_class = self.card._ticket_system_data.get('service_class', None)
return service_class
def get_due_date(self):
due_date = None
if not self.card._ticket_system_data:
self.card.ticket_system.update(sync=True)
due_date = self.card._ticket_system_data.get('due_date', None)
return due_date
def issue_to_dictionary(self, obj):
idic = {}
keys = ['summary', 'key', 'reporter', 'assignee', 'description',
'status', 'type', 'updated', 'fixVersions', 'created', 'resolution']
for key in keys:
idic[key] = getattr(obj, key)
idic['status'] = self.resolve_status(idic['status'])
idic['type'] = self.resolve_type(idic['type'])
idic['resolution'] = self.resolve_resolution(idic['resolution'])
idic['fixVersions'] = [self.object_to_dict(v) for v in idic['fixVersions']]
return idic
def object_to_dict(self, obj):
keys = [k for k in dir(obj) if not k.startswith("_")]
dic = dict([(key, getattr(obj, key)) for key in keys])
return dic
def resolve_resolution(self, resolution_id):
key = "%s_resolutions" % self.cache_prefix
resolutions = cache.get(key)
if resolutions:
try:
resolutions = pickle.loads(resolutions)
except pickle.UnpicklingError:
resolutions = None
if not resolutions:
self.logger.warn("Cache miss for %s" % key)
resolutions = self.service.getResolutions()
resolutions = [self.object_to_dict(r) for r in resolutions]
cache.set(key, pickle.dumps(resolutions))
resolution = [r for r in resolutions if r.get('id') == resolution_id]
try:
return resolution[0]
except IndexError:
self.logger.warn("Couldn't find resolution_id: %s in %s" %
(resolution_id, resolutions))
return {}
def resolve_status(self, status_id):
key = "%s_statuses" % self.cache_prefix
statuses = cache.get(key)
if statuses:
try:
statuses = pickle.loads(statuses)
except pickle.UnpicklingError:
statuses = None
if not statuses:
self.logger.warn("Cache miss for %s" % key)
statuses = self.service.getStatuses()
statuses = [self.object_to_dict(s) for s in statuses]
cache.set(key, pickle.dumps(statuses))
status = [s for s in statuses if s.get('id') == status_id]
try:
return status[0]
except IndexError:
self.logger.warn("Couldn't find status_id: %s in %s" %
(status_id, statuses))
return {}
def resolve_type(self, type_id):
key = "%s_issue_types_and_subtasks" % self.cache_prefix
the_types = cache.get(key)
the_types = None
if the_types:
try:
the_types = pickle.loads(the_types)
except pickle.UnpicklingError:
the_types = None
if the_types is None:
self.logger.warn("Cache miss for %s" % key)
the_types = self.service.getIssueTypes()
the_types = [self.object_to_dict(t) for t in the_types]
the_subtask_types = self.service.getSubTaskIssueTypes()
the_subtask_types = [self.object_to_dict(st) for st in the_subtask_types]
the_types.extend(the_subtask_types)
cache.set(key, pickle.dumps(the_types))
the_type = [t for t in the_types if t['id'] == type_id]
try:
return the_type[0]
except IndexError:
type_help = ["%s -- %s" % (t['id'], t['name'])
for t in the_types]
self.logger.warn("Couldn't find type_id: %s in %s" %
(type_id, type_help))
return {}
def update(self, issue=None, sync=False):
if self.card._ticket_system_data and self.card.id:
if sync:
self.actually_update(issue)
else:
update_ticket.apply_async((self.card.id,))
else:
# first fetch
self.actually_update(issue)
def _get_custom_field_values(self, field_id, fields):
for field in fields:
if field.customfieldId == 'customfield_%s' % field_id:
return field.values
return None
def id_devs(self, issue):
developer_id = 10210
custom_fields = issue.customFieldValues
devs = self._get_custom_field_values(developer_id, custom_fields) or []
return list(set(devs))
def id_service_class(self, issue):
service_class_id = 10321
custom_fields = issue.customFieldValues
service_class = self._get_custom_field_values(service_class_id, custom_fields) or []
try:
return service_class[0]
except IndexError:
return None
def _parse_date_string(self, datestring):
return dateutil.parser.parse(datestring)
def id_due_date(self, issue):
due_date_id = 10322
custom_fields = issue.customFieldValues
due_date_value = self._get_custom_field_values(due_date_id, custom_fields) or []
try:
due_date_value = due_date_value[0]
except IndexError:
due_date_value = None
if hasattr(due_date_value, "startswith"):
# It's a string, thanks JIRA :-/
due_date_value = self._parse_date_string(due_date_value)
return due_date_value
def id_testers(self, issue):
qa_resource_id = 10133
custom_fields = issue.customFieldValues
qaers = self._get_custom_field_values(qa_resource_id, custom_fields)
if not qaers:
return []
return list(set(qaers))
def update_state(self, card):
mappings = app.config.get('TICKET_STATE_MAPPING', {})
if not mappings:
return card # Short circuit
current_ticket_status = \
card._ticket_system_data.get(u'status', {}).get(u'name', '')
current_resolution = card._ticket_system_data.get(u'resolution', {}).get(u'name', '')
mapping = mappings.get(current_ticket_status, (None, None, None))
state, datefield = mapping[0], mapping[1]
resolution = None
if len(mapping) >= 3:
resolution = mapping[2]
state_changed = False
if state:
oldstate = card.state
if card.state != state:
if resolution is None or resolution is not None and current_resolution == resolution:
state_changed = True
card.state = state
resolution_msg = ""
if resolution is not None:
resolution_msg = "and resolution was %s" % current_resolution
self.logger.info(
"AUTOMOVE: %s state moved %s => %s because status was %s %s" % (self.card.key,
oldstate, card.state, current_ticket_status, resolution_msg))
if datefield and state_changed is True:
current_value = getattr(card, datefield)
if not current_value:
setattr(card, datefield, datetime.datetime.now())
self.logger.info(
"AUTOMOVE DATE: %s %s set to %s because status was %s" % (self.card.key,
datefield, getattr(card, datefield), current_ticket_status))
return card
def actually_update(self, issue=None):
statsd_conn = self.statsd.get_client('actually_update')
counter = statsd_conn.get_client(class_=statsd.Counter)
timer = statsd_conn.get_client(class_=statsd.Timer)
timer.start()
counter += 1
super(JIRAHelper, self).update()
if not issue:
self.logger.info("Fetching JIRA data for %s" % self.card.key)
try:
issue = self.get_issue(self.card.key)
except Exception:
issue = None
log_exception("Couldn't fetch JIRA issue %s" % self.card.key)
if issue:
issue_dict = self.issue_to_dictionary(issue)
# TODO: This is super specific to CMG's JIRA setup. Fixme.
issue_dict['developers'] = self.id_devs(issue)
issue_dict['testers'] = self.id_testers(issue)
issue_dict['service_class'] = self.id_service_class(issue)
issue_dict['due_date'] = self.id_due_date(issue)
elif self.card._ticket_system_data:
return None
else:
# We want to ensure there's at least an empty dict
issue_dict = {}
return None
now = datetime.datetime.now()
self.card._ticket_system_data = issue_dict
self.card._ticket_system_updated_at = now
self.card.created_at = issue_dict['created']
self.card = self.update_state(self.card)
if self.card.id:
self.card.save()
self.card.reload()
self.logger.info(
"%s updated at %s" % (self.card.key,
self.card._ticket_system_updated_at))
timer.stop()
def get_ticket_url(self, key=None):
key = key or self.card.key
parsed_url = urlparse.urlparse(self.wsdl_url)
browse_url_parts = [
parsed_url.scheme,
'://',
parsed_url.netloc,
'/browse/',
key,
]
return ''.join(browse_url_parts)
| |
#!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
parseutils.py
Routines to parse "flexible" configuration files for tools like
gpaddmirrors, gprecoverseg, gpexpand, etc.
Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
"""
import sys
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.gplog import get_default_logger, logging_is_verbose
logger = get_default_logger()
def caller():
"Return name of calling function"
if logging_is_verbose():
return sys._getframe(1).f_code.co_name + '()'
return ''
def canonicalize_address(addr):
"""
Encases addresses in [ ] per RFC 2732. Generally used to deal with ':'
characters which are also often used as delimiters.
Returns the addr string if it doesn't contain any ':' characters.
If addr contains ':' and also contains a '[' then the addr string is
simply returned under the assumption that it is already escaped as needed.
Otherwise return a new string from addr by adding '[' prefix and ']' suffix.
Examples
--------
>>> canonicalize_address('myhost')
'myhost'
>>> canonicalize_address('127.0.0.1')
'127.0.0.1'
>>> canonicalize_address('::1')
'[::1]'
>>> canonicalize_address('[::1]')
'[::1]'
>>> canonicalize_address('2620:0:170:610::13')
'[2620:0:170:610::13]'
>>> canonicalize_address('[2620:0:170:610::13]')
'[2620:0:170:610::13]'
@param addr: the address to possibly encase in [ ]
@returns: the addresss, encased in [] if necessary
"""
if ':' not in addr: return addr
if '[' in addr: return addr
return '[' + addr + ']'
#
# line parsing
#
def consume_to(delimiter, rest):
"""
Consume characters from rest string until we encounter the delimiter.
Returns (None, after, None) where after are the characters after delimiter
or (None, rest, 'does not contain '+delimiter) when delimiter is not encountered.
Examples
--------
>>> consume_to('=', 'abc=def:ghi')
(None, 'def:ghi', None)
@param delimiter: the delimiter string
@param rest: the string to read such as 'abc:def:ghi'
@returns: (None, after, None) tuple such as (None, 'def:ghi', None)
"""
p = rest.find(delimiter)
if p < 0:
return None, rest, 'does not contain '+delimiter
return None, rest[p+1:], None
def read_to(delimiter, rest):
"""
Read characters from rest string until we encounter the delimiter.
Separate the string into characters 'before' and 'after' the delimiter.
If no delimiter is found, assign entire string to 'before' and None to 'after'.
Examples
--------
>>> read_to(':', 'abc:def:ghi')
('abc', 'def:ghi', None)
>>> read_to(':', 'abc:def')
('abc', 'def', None)
>>> read_to(':', 'abc')
('abc', None, None)
>>> read_to(':', '')
('', None, None)
Note this returns a 3-tuple for compatibility with other routines
which use the third element as an error message
@param delimiter: the delimiter string
@param rest: the string to read such as 'abc:def:ghi'
@returns: (before, after, None) tuple such as ('abc', 'def:ghi', None)
"""
p = rest.find(delimiter)
if p < 0:
return rest, None, None
return rest[0:p], rest[p+1:], None
def read_to_bracketed(delimiter, rest):
"""
Read characters from rest string which is expected to start with a '['.
If rest does not start with '[', return a tuple (None, rest, 'does not begin with [').
If rest string starts with a '[', then read until we find ']'.
If no ']' is found, return a tuple (None, rest, 'does not contain ending ]').
Otherwise separate the string into 'before' representing characters between
'[' and ']' and 'after' representing characters after the ']' and then check
that the first character found after the ']' is a :'.
If there are no characters after the ']', return a tuple (before, None, None)
where before contains the characters between '[' and ']'.
If there are characters after ']' other than the delimiter, return a tuple
(None, rest, 'characters not allowed after ending ]')
Otherwise return a tuple (before, after, None) where before contains the
characters between '[' and ']' and after contains the characters after the ']:'.
This function avoids raising Exceptions for these particular cases of
malformed input since they are easier to report in the calling function.
Examples
--------
>>> read_to_bracketed(':', '[abc:def]')
('abc:def', None, None)
>>> read_to_bracketed(':', '[abc]:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_bracketed(':', '[abc:def]:ghi:jkl')
('abc:def', 'ghi:jkl', None)
>>> read_to_bracketed(':', 'abc:def:ghi:jkl')
(None, 'abc:def:ghi:jkl', 'does not begin with [')
>>> read_to_bracketed(':', '[abc:def:ghi:jkl')
(None, '[abc:def:ghi:jkl', 'does not contain ending ]')
>>> read_to_bracketed(':', '[abc]extra:def:ghi:jkl')
(None, '[abc]extra:def:ghi:jkl', 'characters not allowed after ending ]')
@param delimiter: the delimiter string
@param rest: the string to read such as '[abc:def]:ghi'
@returns: (before, after, reason) tuple such as ('abc:def', 'ghi', None)
"""
if not rest.startswith('['):
return None, rest, 'does not begin with ['
p = rest.find(']')
if p < 0:
return None, rest, 'does not contain ending ]'
if len(rest[p+1:]) < 1:
return rest[1:p], None, None
if rest[p+1] != delimiter:
return None, rest, 'characters not allowed after ending ]'
return rest[1:p], rest[p+2:], None
def read_to_possibly_bracketed(delimiter, rest):
"""
Behave as read_bracketed above when rest starts with a '[',
otherwise as read_to_colon. This is intended to support fields
which may contain an IPv6 address, an IPv4 address or a hostname.
Examples
--------
>>> read_to_possibly_bracketed(':', 'abc:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_possibly_bracketed(':', '[abc]:def:ghi')
('abc', 'def:ghi', None)
>>> read_to_possibly_bracketed(':', '[abc:def]:ghi')
('abc:def', 'ghi', None)
>>> read_to_possibly_bracketed(':', '[]:ghi')
('', 'ghi', None)
>>> read_to_possibly_bracketed(':', ':ghi')
('', 'ghi', None)
>>> read_to_possibly_bracketed(':', '[ghi]')
('ghi', None, None)
>>> read_to_possibly_bracketed(':', '[]')
('', None, None)
>>> read_to_possibly_bracketed(':', '')
('', None, None)
@param delimiter: the delimiter string
@param rest: the string to read such as '[abc:def]:ghi'
@returns: (before, after, reason) tuple such as ('abc:def', 'ghi', None)
"""
if rest.startswith('['):
return read_to_bracketed(delimiter, rest)
return read_to(delimiter, rest)
class LineParser:
"""
Manage state to parse a single line, generally from a configuration
file with fields delimited by colons.
"""
def __init__(self, caller, filename, lineno, line):
"Initialize"
(self.caller, self.filename, self.lineno, self.line, self.rest, self.error) = (caller, filename, lineno, line, line, None)
self.logger = logger
if logging_is_verbose():
self.logger.debug("%s:%s" % (filename, lineno))
def ensure_more_to_process(self, name):
"Raise an exception if we've exhausted the input line"
if self.rest is None:
msg = "out of values (reading %s)" % name
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (self.filename, self.lineno, self.caller, self.line, msg))
def read_delimited_field(self, delimiter, name="next field", reader=read_to):
"""
Attempts to read the next field in the line up to the specified delimiter
using the specified reading method, raising any error encountered as an
exception. Returns the read field when successful.
"""
self.ensure_more_to_process(name)
value, self.rest, error = reader(delimiter, self.rest)
if error is not None:
msg = "%s (reading %s) >>%s" % (error, name, self.rest)
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (self.filename, self.lineno, self.caller, self.line, msg))
if logging_is_verbose():
self.logger.debug(" name=%-30s delimiter='%s' value=%s" % (name, delimiter, value))
return value
def ensure_starts_with(self, expected):
"Returns true if line starts with expected value, or raise an exception otherwise"
if not self.line.startswith(expected):
msg = "does not start with %s" % expected
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (self.filename, self.lineno, self.caller, self.line, msg))
self.rest = self.rest[len(expected):]
def handle_field(self, name, dst=None, delimiter=':', stripchars=None):
"""
Attempts to read the next field up to a given delimiter.
Names starting with '[' indicate that the field should use the bracketed parsing logic.
If dst is not none, also assigns the value to dst[name].
If stripchars is not none, value is first stripped of leading and trailing stripchars.
"""
if name[0] == '[':
name = name.strip('[]')
value = self.read_delimited_field(delimiter, name, read_to_possibly_bracketed)
else:
value = self.read_delimited_field(delimiter, name)
if stripchars is not None:
value = value.strip(stripchars)
if dst is not None:
dst[name] = value
return value
#
# file parsing
#
def line_reader(f):
"""
Read the contents of the given input, generating the non-blank non-comment
lines found within as a series of tuples of the form (line number, line).
>>> [l for l in line_reader(['', '# test', 'abc:def'])]
[(3, 'abc:def')]
"""
for offset, line in enumerate(f):
line = line.strip()
if len(line) < 1 or line[0] == '#':
continue
yield offset+1, line
################
# gpfilespace format
#
# First line in file is the filespace name, remaining lines are
# specify hostname, dbid, and a path:
#
# filespace:name
# hostname:dbid:path
# ...
################
def parse_fspacename(filename, lineno, line):
"""
Parse the filespace: line which appears at the beginning of the gpfilespace configuration file.
>>> parse_fspacename('file', 1, 'filespace:blah')
'blah'
"""
p = LineParser(caller(), filename, lineno, line)
p.ensure_starts_with('filespace:')
fspacename = p.read_delimited_field(':')
if p.rest is not None:
msg = "unexpected characters after filespace name >>%s" % p.rest
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
return fspacename
def parse_gpfilespace_line(filename, lineno, line):
"""
Parse a line of the gpfilespace configuration file other than the first.
>>> parse_gpfilespace_line('file', 1, '[::1]:dbid:path')
('::1', 'dbid', 'path')
>>> parse_gpfilespace_line('file', 1, 'host:dbid:path')
('host', 'dbid', 'path')
"""
p = LineParser(caller(), filename, lineno, line)
host = p.handle_field('[host]') # [host] indicates possible IPv6 address
dbid = p.handle_field('dbid')
path = p.handle_field('path')
if p.rest is not None:
msg = "unexpected characters after path name >>%s" % p.rest
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
return host, dbid, path
################
# gpexpand segment file format:
#
# Form of file is hostname:address:port:dtadir:dbid:contentId:role[:replicationPort]
################
def parse_gpexpand_segment_line(filename, lineno, line):
"""
Parse a line of the gpexpand configuration file.
>>> parse_gpexpand_segment_line('file', 1, "localhost:[::1]:40001:/Users/ctaylor/data/p2/gpseg1:4:1:p")
('localhost', '::1', '40001', '/Users/ctaylor/data/p2/gpseg1', '4', '1', 'p', None)
>>> parse_gpexpand_segment_line('file', 1, "localhost:[::1]:40001:/Users/ctaylor/data/p2/gpseg1:4:1:p:41001")
('localhost', '::1', '40001', '/Users/ctaylor/data/p2/gpseg1', '4', '1', 'p', '41001')
"""
p = LineParser(caller(), filename, lineno, line)
hostname = p.handle_field('[host]') # [host] indicates possible IPv6 address
address = p.handle_field('[address]') # [address] indicates possible IPv6 address
port = p.handle_field('port')
datadir = p.handle_field('datadir')
dbid = p.handle_field('dbid')
contentId = p.handle_field('contentId')
role = p.handle_field('role')
replicationPort = None
if p.rest is not None:
replicationPort = p.handle_field('replicationPort')
if p.rest is not None:
msg = "unexpected characters after replicationPort >>%s" % p.rest
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
return hostname, address, port, datadir, dbid, contentId, role, replicationPort
################
# gpaddmirrors format:
#
# filespaceOrder=[filespace1_fsname[:filespace2_fsname:...]]
# mirror[content]=content:address:port:mir_replication_port:pri_replication_port:fselocation[:fselocation:...]
#
################
def parse_filespace_order(filename, lineno, line):
"""
Parse the filespaceOrder= line appearing at the beginning of the gpaddmirrors,
gpmovemirrors and gprecoverseg configuration files.
>>> parse_filespace_order('file', 1, "filespaceOrder=fs1:fs2:fs3")
['fs1', 'fs2', 'fs3']
>>> parse_filespace_order('file', 1, "filespaceOrder=")
[]
"""
p = LineParser(caller(), filename, lineno, line)
p.ensure_starts_with('filespaceOrder=')
fslist = []
while p.rest:
fslist.append( p.read_delimited_field(':', 'next filespace') )
return fslist
def parse_gpaddmirrors_line(filename, lineno, line, fslist):
"""
Parse a line in the gpaddmirrors configuration file other than the first.
>>> line = "mirror0=0:[::1]:40001:50001:60001:/Users/ctaylor/data/p2/gpseg1"
>>> fixed, flex = parse_gpaddmirrors_line('file', 1, line, [])
>>> fixed["address"], fixed["contentId"], fixed["dataDirectory"]
('::1', '0', '/Users/ctaylor/data/p2/gpseg1')
"""
fixed = {}
flexible = {}
p = LineParser(caller(), filename, lineno, line)
p.ensure_starts_with('mirror')
p.read_delimited_field('=', 'content id', consume_to)
# [address] indicates possible IPv6 address
for field in [ 'contentId', '[address]', 'port', 'replicationPort', 'primarySegmentReplicationPort', 'dataDirectory' ]:
p.handle_field(field, fixed)
for fsname in fslist:
p.handle_field(fsname, flexible)
return fixed, flexible
################
# gpmovemirrors format:
#
# This is basically the same as the gprecoverseg format (since gpmovemirrors ultimately just
# passes the input file after validating it) but the field names are slightly different.
#
# filespaceOrder=[filespace1_fsname[:filespace2_fsname:...]
# old_address:port:datadir new_address:port:replication_port:datadir[:fselocation:...]
# ^
# note space
################
def parse_gpmovemirrors_line(filename, lineno, line, fslist):
"""
Parse a line in the gpmovemirrors configuration file other than the first.
>>> line = "[::1]:40001:/Users/ctaylor/data/m2/gpseg1 [::2]:40101:50101:/Users/ctaylor/data/m2/gpseg1:/fs1"
>>> fixed, flex = parse_gpmovemirrors_line('file', 1, line, ['fs1'])
>>> fixed["oldAddress"], fixed["newAddress"]
('::1', '::2')
>>> flex
{'fs1': '/fs1'}
"""
groups = len(line.split())
if groups != 2:
msg = "need two groups of fields delimited by a space for old and new mirror, not %d" % groups
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
fixed = {}
flexible = {}
p = LineParser(caller(), filename, lineno, line)
p.handle_field('[oldAddress]', fixed) # [oldAddress] indicates possible IPv6 address
p.handle_field('oldPort', fixed)
p.handle_field('oldDataDirectory', fixed, delimiter=' ', stripchars=' \t') # MPP-15675 note stripchars here and next line
p.handle_field('[newAddress]', fixed, stripchars=' \t') # [newAddress] indicates possible IPv6 address
p.handle_field('newPort', fixed)
p.handle_field('newReplicationPort', fixed)
p.handle_field('newDataDirectory', fixed)
for fsname in fslist:
p.handle_field(fsname, flexible)
if p.rest is not None:
msg = "unexpected characters after mirror fields >>%s" % p.rest
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
return fixed, flexible
################
# gprecoverseg format:
#
# filespaceOrder=[filespace1_fsname[:filespace2_fsname:...]]
# failed_host_address:port:datadir [recovery_host_address:port:replication_port:datadir[:fselocation:...]]
# ^
# note space
#
# filespace locations are only present at the end of the other fields when there
# are two groups of fields separated by a space. If there is only one group of
# fields then we assume the entire line is only three fields as below with no
# filespace locations:
#
# failed_host_address:port:datadir
################
def parse_gprecoverseg_line(filename, lineno, line, fslist):
"""
Parse a line in the gprecoverseg configuration file other than the first.
>>> line = "[::1]:40001:/Users/ctaylor/data/m2/gpseg1"
>>> fixed, flex = parse_gprecoverseg_line('file', 1, line, [])
>>> fixed["failedAddress"], fixed["failedPort"], fixed["failedDataDirectory"]
('::1', '40001', '/Users/ctaylor/data/m2/gpseg1')
>>> line = "[::1]:40001:/Users/ctaylor/data/m2/gpseg1 [::2]:40101:50101:/Users/ctaylor/data/m2/gpseg1:/fs1"
>>> fixed, flex = parse_gprecoverseg_line('file', 1, line, ['fs1'])
>>> fixed["newAddress"], fixed["newPort"], fixed["newReplicationPort"], fixed["newDataDirectory"]
('::2', '40101', '50101', '/Users/ctaylor/data/m2/gpseg1')
>>> flex
{'fs1': '/fs1'}
"""
groups = len(line.split())
if groups not in [1, 2]:
msg = "only one or two groups of fields delimited by a space, not %d" % groups
raise ExceptionNoStackTraceNeeded("%s:%s:%s LINE >>%s\n%s" % (filename, lineno, caller(), line, msg))
fixed = {}
flexible = {}
p = LineParser(caller(), filename, lineno, line)
p.handle_field('[failedAddress]', fixed) # [failedAddress] indicates possible IPv6 address
p.handle_field('failedPort', fixed)
if groups == 1:
p.handle_field('failedDataDirectory', fixed)
else:
p.handle_field('failedDataDirectory', fixed, delimiter=' ', stripchars=' \t') # MPP-15675 note stripchars here and next line
p.handle_field('[newAddress]', fixed, stripchars=' \t') # [newAddress] indicates possible IPv6 address
p.handle_field('newPort', fixed)
p.handle_field('newReplicationPort', fixed)
p.handle_field('newDataDirectory', fixed)
for fsname in fslist:
p.handle_field(fsname, flexible)
return fixed, flexible
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.