gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras metrics serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import metrics
from tensorflow.python.keras import optimizer_v2
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
# Custom metric
class MyMeanAbsoluteError(metrics.MeanMetricWrapper):
def __init__(self, name='my_mae', dtype=None):
super(MyMeanAbsoluteError, self).__init__(_my_mae, name, dtype=dtype)
# Custom metric function
def _my_mae(y_true, y_pred):
return keras.backend.mean(math_ops.abs(y_pred - y_true), axis=-1)
def _get_multi_io_model():
inp_1 = layers.Input(shape=(1,), name='input_1')
inp_2 = layers.Input(shape=(1,), name='input_2')
d = testing_utils.Bias(name='output')
out_1 = d(inp_1)
out_2 = d(inp_2)
return keras.Model([inp_1, inp_2], [out_1, out_2])
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
dict(testcase_name='string', value=['mae']),
dict(testcase_name='built_in_fn', value=[metrics.mae]),
dict(testcase_name='built_in_class', value=[metrics.MeanAbsoluteError]),
dict(testcase_name='custom_fn', value=[_my_mae]),
dict(testcase_name='custom_class', value=[MyMeanAbsoluteError]),
dict(
testcase_name='list_of_built_in_fn_and_list',
value=[metrics.mae, [metrics.mae]]),
dict(
testcase_name='list_of_built_in_class_and_list',
value=[metrics.MeanAbsoluteError, [metrics.MeanAbsoluteError]]),
dict(
testcase_name='list_of_custom_fn_and_list', value=[_my_mae, [_my_mae]]),
dict(
testcase_name='list_of_custom_class_and_list',
value=[MyMeanAbsoluteError, [MyMeanAbsoluteError]]),
dict(
testcase_name='list_of_lists_of_custom_fns',
value=[[_my_mae], [_my_mae, 'mae']]),
dict(
testcase_name='list_of_lists_of_custom_classes',
value=[[MyMeanAbsoluteError], [MyMeanAbsoluteError, 'mae']]),
dict(
testcase_name='dict_of_list_of_string',
value={
'output': ['mae'],
'output_1': ['mae'],
}),
dict(
testcase_name='dict_of_list_of_built_in_fn',
value={
'output': [metrics.mae],
'output_1': [metrics.mae],
}),
dict(
testcase_name='dict_of_list_of_built_in_class',
value={
'output': [metrics.MeanAbsoluteError],
'output_1': [metrics.MeanAbsoluteError],
}),
dict(
testcase_name='dict_of_list_of_custom_fn',
value={
'output': [_my_mae],
'output_1': [_my_mae],
}),
dict(
testcase_name='dict_of_list_of_custom_class',
value={
'output': [MyMeanAbsoluteError],
'output_1': [MyMeanAbsoluteError],
}),
dict(
testcase_name='dict_of_string',
value={
'output': 'mae',
'output_1': 'mae',
}),
dict(
testcase_name='dict_of_built_in_fn',
value={
'output': metrics.mae,
'output_1': metrics.mae,
}),
dict(
testcase_name='dict_of_built_in_class',
value={
'output': metrics.MeanAbsoluteError,
'output_1': metrics.MeanAbsoluteError,
}),
dict(
testcase_name='dict_of_custom_fn',
value={
'output': _my_mae,
'output_1': _my_mae
}),
dict(
testcase_name='dict_of_custom_class',
value={
'output': MyMeanAbsoluteError,
'output_1': MyMeanAbsoluteError,
}),
)
class MetricsSerialization(keras_parameterized.TestCase):
def setUp(self):
super(MetricsSerialization, self).setUp()
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir)
self.model_filename = os.path.join(tmpdir, 'tmp_model_metric.h5')
self.x = np.array([[0.], [1.], [2.]], dtype='float32')
self.y = np.array([[0.5], [2.], [3.5]], dtype='float32')
self.w = np.array([1.25, 0.5, 1.25], dtype='float32')
def test_serializing_model_with_metric_with_custom_object_scope(self, value):
def get_instance(x):
if isinstance(x, str):
return x
if isinstance(x, type) and issubclass(x, metrics.Metric):
return x()
return x
metric_input = nest.map_structure(get_instance, value)
weighted_metric_input = nest.map_structure(get_instance, value)
with generic_utils.custom_object_scope({
'MyMeanAbsoluteError': MyMeanAbsoluteError,
'_my_mae': _my_mae,
'Bias': testing_utils.Bias,
}):
model = _get_multi_io_model()
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
'mae',
metrics=metric_input,
weighted_metrics=weighted_metric_input,
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.x], [self.y, self.y],
batch_size=3,
epochs=3,
sample_weight=[self.w, self.w])
# Assert training.
self.assertAllClose(history.history['loss'], [2., 1.6, 1.2], 1e-3)
eval_results = model.evaluate([self.x, self.x], [self.y, self.y],
sample_weight=[self.w, self.w])
if h5py is None:
return
model.save(self.model_filename)
loaded_model = keras.models.load_model(self.model_filename)
loaded_model.predict([self.x, self.x])
loaded_eval_results = loaded_model.evaluate(
[self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w])
# Assert all evaluation results are the same.
self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
def test_serializing_model_with_metric_with_custom_objects(self, value):
def get_instance(x):
if isinstance(x, str):
return x
if isinstance(x, type) and issubclass(x, metrics.Metric):
return x()
return x
metric_input = nest.map_structure(get_instance, value)
weighted_metric_input = nest.map_structure(get_instance, value)
model = _get_multi_io_model()
model.compile(
optimizer_v2.gradient_descent.SGD(0.1),
'mae',
metrics=metric_input,
weighted_metrics=weighted_metric_input,
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit([self.x, self.x], [self.y, self.y],
batch_size=3,
epochs=3,
sample_weight=[self.w, self.w])
# Assert training.
self.assertAllClose(history.history['loss'], [2., 1.6, 1.2], 1e-3)
eval_results = model.evaluate([self.x, self.x], [self.y, self.y],
sample_weight=[self.w, self.w])
if h5py is None:
return
model.save(self.model_filename)
loaded_model = keras.models.load_model(
self.model_filename,
custom_objects={
'MyMeanAbsoluteError': MyMeanAbsoluteError,
'_my_mae': _my_mae,
'Bias': testing_utils.Bias,
})
loaded_model.predict([self.x, self.x])
loaded_eval_results = loaded_model.evaluate([self.x, self.x],
[self.y, self.y],
sample_weight=[self.w, self.w])
# Assert all evaluation results are the same.
self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
if __name__ == '__main__':
test.main()
| |
# Copyright 2018 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.notifications.objects import base
from nova.notifications.objects import flavor as flavor_payload
from nova.notifications.objects import image as image_payload
from nova.notifications.objects import server_group as server_group_payload
from nova.objects import base as nova_base
from nova.objects import fields
@nova_base.NovaObjectRegistry.register_notification
class RequestSpecPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
# Version 1.1: Add force_hosts, force_nodes, ignore_hosts, image_meta,
# instance_group, requested_destination, retry,
# scheduler_hints and security_groups fields
VERSION = '1.1'
SCHEMA = {
'ignore_hosts': ('request_spec', 'ignore_hosts'),
'instance_uuid': ('request_spec', 'instance_uuid'),
'project_id': ('request_spec', 'project_id'),
'user_id': ('request_spec', 'user_id'),
'availability_zone': ('request_spec', 'availability_zone'),
'num_instances': ('request_spec', 'num_instances'),
'scheduler_hints': ('request_spec', 'scheduler_hints'),
}
fields = {
'instance_uuid': fields.UUIDField(),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'flavor': fields.ObjectField('FlavorPayload', nullable=True),
'force_hosts': fields.StringField(nullable=True),
'force_nodes': fields.StringField(nullable=True),
'ignore_hosts': fields.ListOfStringsField(nullable=True),
'image_meta': fields.ObjectField('ImageMetaPayload', nullable=True),
'instance_group': fields.ObjectField('ServerGroupPayload',
nullable=True),
'image': fields.ObjectField('ImageMetaPayload', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopologyPayload',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequestsPayload',
nullable=True),
'num_instances': fields.IntegerField(default=1),
'requested_destination': fields.ObjectField('DestinationPayload',
nullable=True),
'retry': fields.ObjectField('SchedulerRetriesPayload', nullable=True),
'scheduler_hints': fields.DictOfListOfStringsField(nullable=True),
'security_groups': fields.ListOfStringsField(),
}
def __init__(self, request_spec):
super(RequestSpecPayload, self).__init__()
self.flavor = flavor_payload.FlavorPayload(
request_spec.flavor) if request_spec.obj_attr_is_set(
'flavor') else None
self.image = image_payload.ImageMetaPayload(
request_spec.image) if request_spec.image else None
if request_spec.numa_topology is not None:
if not request_spec.numa_topology.obj_attr_is_set('instance_uuid'):
request_spec.numa_topology.instance_uuid = (
request_spec.instance_uuid)
self.numa_topology = InstanceNUMATopologyPayload(
request_spec.numa_topology)
else:
self.numa_topology = None
if request_spec.pci_requests is not None:
if not request_spec.pci_requests.obj_attr_is_set('instance_uuid'):
request_spec.pci_requests.instance_uuid = (
request_spec.instance_uuid)
self.pci_requests = InstancePCIRequestsPayload(
request_spec.pci_requests)
else:
self.pci_requests = None
if 'requested_destination' in request_spec \
and request_spec.requested_destination:
self.requested_destination = DestinationPayload(
destination=request_spec.requested_destination)
else:
self.requested_destination = None
if 'retry' in request_spec and request_spec.retry:
self.retry = SchedulerRetriesPayload(
retry=request_spec.retry)
else:
self.retry = None
self.security_groups = [
sec_group.identifier for sec_group in request_spec.security_groups]
if 'instance_group' in request_spec and request_spec.instance_group:
self.instance_group = server_group_payload.ServerGroupPayload(
group=request_spec.instance_group)
else:
self.instance_group = None
if 'force_hosts' in request_spec and request_spec.force_hosts:
self.force_hosts = request_spec.force_hosts[0]
else:
self.force_hosts = None
if 'force_nodes' in request_spec and request_spec.force_nodes:
self.force_nodes = request_spec.force_nodes[0]
else:
self.force_nodes = None
self.populate_schema(request_spec=request_spec)
@nova_base.NovaObjectRegistry.register_notification
class InstanceNUMATopologyPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'instance_uuid': ('numa_topology', 'instance_uuid'),
'emulator_threads_policy': ('numa_topology',
'emulator_threads_policy')
}
fields = {
'instance_uuid': fields.UUIDField(),
'cells': fields.ListOfObjectsField('InstanceNUMACellPayload'),
'emulator_threads_policy': fields.CPUEmulatorThreadsPolicyField(
nullable=True)
}
def __init__(self, numa_topology):
super(InstanceNUMATopologyPayload, self).__init__()
self.cells = InstanceNUMACellPayload.from_numa_cell_list_obj(
numa_topology.cells)
self.populate_schema(numa_topology=numa_topology)
@nova_base.NovaObjectRegistry.register_notification
class InstanceNUMACellPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
# Version 1.1: Added pcpuset field
# Version 1.2: Added 'mixed' to cpu_policy field
VERSION = '1.2'
SCHEMA = {
'id': ('numa_cell', 'id'),
'cpuset': ('numa_cell', 'cpuset'),
'pcpuset': ('numa_cell', 'pcpuset'),
'cpuset_reserved': ('numa_cell', 'cpuset_reserved'),
'memory': ('numa_cell', 'memory'),
'pagesize': ('numa_cell', 'pagesize'),
'cpu_pinning_raw': ('numa_cell', 'cpu_pinning_raw'),
'cpu_policy': ('numa_cell', 'cpu_policy'),
'cpu_thread_policy': ('numa_cell', 'cpu_thread_policy'),
}
fields = {
'id': fields.IntegerField(),
'cpuset': fields.SetOfIntegersField(),
'pcpuset': fields.SetOfIntegersField(),
'cpuset_reserved': fields.SetOfIntegersField(nullable=True),
'memory': fields.IntegerField(),
'pagesize': fields.IntegerField(nullable=True),
'cpu_topology': fields.ObjectField('VirtCPUTopologyPayload',
nullable=True),
'cpu_pinning_raw': fields.DictOfIntegersField(nullable=True),
'cpu_policy': fields.CPUAllocationPolicyField(nullable=True),
'cpu_thread_policy': fields.CPUThreadAllocationPolicyField(
nullable=True),
}
def __init__(self, numa_cell):
super(InstanceNUMACellPayload, self).__init__()
if (numa_cell.obj_attr_is_set('cpu_topology') and
numa_cell.cpu_topology is not None):
self.cpu_topology = VirtCPUTopologyPayload(numa_cell.cpu_topology)
else:
self.cpu_topology = None
self.populate_schema(numa_cell=numa_cell)
@classmethod
def from_numa_cell_list_obj(cls, numa_cell_list):
"""Returns a list of InstanceNUMACellPayload objects
based on the passed list of InstanceNUMACell objects.
"""
payloads = []
for numa_cell in numa_cell_list:
payloads.append(cls(numa_cell))
return payloads
@nova_base.NovaObjectRegistry.register_notification
class VirtCPUTopologyPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'sockets': ('virt_cpu_topology', 'sockets'),
'cores': ('virt_cpu_topology', 'cores'),
'threads': ('virt_cpu_topology', 'threads'),
}
fields = {
'sockets': fields.IntegerField(nullable=True, default=1),
'cores': fields.IntegerField(nullable=True, default=1),
'threads': fields.IntegerField(nullable=True, default=1),
}
def __init__(self, virt_cpu_topology):
super(VirtCPUTopologyPayload, self).__init__()
self.populate_schema(virt_cpu_topology=virt_cpu_topology)
@nova_base.NovaObjectRegistry.register_notification
class InstancePCIRequestsPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'instance_uuid': ('pci_requests', 'instance_uuid')
}
fields = {
'instance_uuid': fields.UUIDField(),
'requests': fields.ListOfObjectsField('InstancePCIRequestPayload')
}
def __init__(self, pci_requests):
super(InstancePCIRequestsPayload, self).__init__()
self.requests = InstancePCIRequestPayload.from_pci_request_list_obj(
pci_requests.requests)
self.populate_schema(pci_requests=pci_requests)
@nova_base.NovaObjectRegistry.register_notification
class InstancePCIRequestPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'count': ('pci_request', 'count'),
'spec': ('pci_request', 'spec'),
'alias_name': ('pci_request', 'alias_name'),
'request_id': ('pci_request', 'request_id'),
'numa_policy': ('pci_request', 'numa_policy')
}
fields = {
'count': fields.IntegerField(),
'spec': fields.ListOfDictOfNullableStringsField(),
'alias_name': fields.StringField(nullable=True),
'request_id': fields.UUIDField(nullable=True),
'numa_policy': fields.PCINUMAAffinityPolicyField(nullable=True)
}
def __init__(self, pci_request):
super(InstancePCIRequestPayload, self).__init__()
self.populate_schema(pci_request=pci_request)
@classmethod
def from_pci_request_list_obj(cls, pci_request_list):
"""Returns a list of InstancePCIRequestPayload objects
based on the passed list of InstancePCIRequest objects.
"""
payloads = []
for pci_request in pci_request_list:
payloads.append(cls(pci_request))
return payloads
@nova_base.NovaObjectRegistry.register_notification
class DestinationPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'aggregates': ('destination', 'aggregates'),
}
fields = {
'host': fields.StringField(),
'node': fields.StringField(nullable=True),
'cell': fields.ObjectField('CellMappingPayload', nullable=True),
'aggregates': fields.ListOfStringsField(nullable=True,
default=None),
}
def __init__(self, destination):
super(DestinationPayload, self).__init__()
if (destination.obj_attr_is_set('host') and
destination.host is not None):
self.host = destination.host
if (destination.obj_attr_is_set('node') and
destination.node is not None):
self.node = destination.node
if (destination.obj_attr_is_set('cell') and
destination.cell is not None):
self.cell = CellMappingPayload(destination.cell)
self.populate_schema(destination=destination)
@nova_base.NovaObjectRegistry.register_notification
class SchedulerRetriesPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
VERSION = '1.0'
SCHEMA = {
'num_attempts': ('retry', 'num_attempts'),
}
fields = {
'num_attempts': fields.IntegerField(),
'hosts': fields.ListOfStringsField(),
}
def __init__(self, retry):
super(SchedulerRetriesPayload, self).__init__()
self.hosts = []
for compute_node in retry.hosts:
self.hosts.append(compute_node.hypervisor_hostname)
self.populate_schema(retry=retry)
@nova_base.NovaObjectRegistry.register_notification
class CellMappingPayload(base.NotificationPayloadBase):
# Version 1.0: Initial version
# Version 2.0: Remove transport_url and database_connection fields.
VERSION = '2.0'
SCHEMA = {
'uuid': ('cell', 'uuid'),
'name': ('cell', 'name'),
'disabled': ('cell', 'disabled'),
}
fields = {
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'disabled': fields.BooleanField(default=False),
}
def __init__(self, cell):
super(CellMappingPayload, self).__init__()
self.populate_schema(cell=cell)
| |
#!/usr/bin/env python
from scitools.FunctionSelector import *
#from math import sqrt, pi
from scitools.numpyutils import *
import sys
sys.path.insert(0, os.path.join(os.environ['scripting'], 'src',
'py', 'examples', 'pde'))
from wave1D_class import WaveEq2, SolverWithViz
from Tkinter import *
import Pmw, tkMessageBox
class WaveSolverWithViz(SolverWithViz):
def do_graphics(self):
"""Plot bottom and surface."""
# bottom: -c(x,t), surface: self.up
if self.g is not None:
H = self.s.physical_prm['c'](self.s.x, self.s.t)
self.__H = -H
#print 't=%g, max u=%g' % (self.s.t,arrmax(self.s.up))
self.g.plotcurves([(self.__H,'H'), (self.s.up,'u')])
self.g.configure(title='time %12.5E' % self.s.t)
class WaveSimGUI:
def __init__(self, parent):
"""Build GUI, allocate solver, etc."""
self.master = parent
self.row_frame = Frame(self.master, borderwidth=2, relief='groove')
self.row_frame.pack()
Button(self.row_frame, text='Physics', command=self.set_physics,
width=12).pack(side='left')
Button(self.row_frame, text='Numerics', command=self.set_numerics,
width=12).pack(side='left')
Button(self.row_frame, text='Simulate', command=self.simulate,
width=12).pack(side='left')
Button(self.row_frame, text='Stop', command=self.stop,
width=12).pack(side='left')
Button(self.row_frame, text='Continue', command=self.continue_,
width=12).pack(side='left')
Button(self.row_frame, text='Quit', command=self.master.destroy,
width=12).pack(side='left')
self.w = WaveSolverWithViz(WaveEq2(), plot=True,
program='BLT', parent_frame=self.master, sleep=0)
self._setup_shapes()
self.nGUI = Parameters(interface='GUI') # numerics part of GUI
self.nGUI.add('stop time for simulation', 60.0, widget_type='entry')
self.nGUI.add('safety factor for time step', 1.0, widget_type='entry')
self.nGUI.add('no of grid cells', 100,
widget_type='slider', values=(0,1000))
self.nGUI.add('movie speed', 1.0,
widget_type='slider', values=(0,1))
self.scheme_coding = 'vectorized'
def set_physics(self):
"""Launch dialog (Physics button in main window)."""
self.physics_dialog = Pmw.Dialog(self.master,
title='Set initial condition and bottom shape',
buttons=('Apply', 'Cancel', 'Dismiss'),
defaultbutton='Apply',
command=self.physics_dialog_action)
self.pGUI = {} # physics parts of GUI
self.pGUI['notebook'] = \
FunctionSelector(self.physics_dialog.interior())
self.pGUI['notebook'].add('Initial surface', self.I_list)
self.pGUI['notebook'].add('Bottom shape', self.H_list)
self.pGUI['notebook'].pack()
self.pGUI['notebook'].page['Bottom shape'].\
page['Drawing'].drawing.set_yaxis(-1.1, 0.1)
self.pGUI['notebook'].page['Bottom shape'].\
page['Drawing'].drawing.configure(width=500)
self.pGUI['notebook'].page['Initial surface'].\
page['Drawing'].drawing.configure(width=500)
try:
# load previously selected pages (if selected...):
self.pGUI['notebook'].select('Initial surface',
self.pGUI['I page'])
self.pGUI['notebook'].select('Bottom shape',
self.pGUI['H page'])
except Exception, msg:
# first time... no pages are user selected
self.pGUI['notebook'].select('Initial surface', 'Gaussian bell')
self.pGUI['notebook'].select('Bottom shape', 'Gaussian bell')
def physics_dialog_action(self, result):
# result contains the name of the button that we clicked
if result == 'Apply':
self.pGUI['I func'], self.pGUI['I page'] = \
self.pGUI['notebook'].get('Initial surface')
self.pGUI['H func'], self.pGUI['H page'] = \
self.pGUI['notebook'].get('Bottom shape')
elif result == 'Dismiss':
self.physics_dialog.destroy() # destroy dialog window
def set_numerics(self):
self.numerics_dialog = Pmw.Dialog(self.master,
title='Set numerical parameters',
buttons=('Apply', 'Cancel', 'Dismiss'),
defaultbutton='Apply',
command=self.numerics_dialog_action)
from scitools.ParameterInterface import parametersGUI
parametersGUI(self.nGUI, self.numerics_dialog.interior())
def numerics_dialog_action(self, result):
if result == 'Dismiss':
self.numerics_dialog.destroy()
# no need to load data when Apply is pressed since
# the data are bound to self.nGUI through Tkinter variables
def stop(self):
self.w.s.finished = True
def simulate(self):
try:
self.w.s.set(I=self.pGUI['I func'], c=self.pGUI['H func'])
except (AttributeError, KeyError):
message = 'You must set physics parameters\n'\
'(first press Physics button and then Apply)'
tkMessageBox.Message(icon='info', type='ok',
message=message, title='About').show()
return
self.w.s.set(L=10, n=self.nGUI['no of grid cells'],
tstop=self.nGUI['stop time for simulation'],
safety_factor=self.nGUI['safety factor for time step'],
user_action=self.w.action, dt=0,
scheme_coding=self.scheme_coding) # 'scalar'/'vectorized'
self.w.g.configure(sleep=1.0-self.nGUI['movie speed'])
# ooops: user may have chosen parameters that are incompatible
# with [-1,1] as range (can read from drawing, but not the others)
self.w.set_graphics(ymax=1.0, ymin=-1.0, xcoor=self.w.s.x)
self.w.s.dump()
self.w.s.set_ic()
self.w.s.solve_problem()
def continue_(self):
# must have run simulate first, i.e., self.w.s.t must exist
if not hasattr(self.w.s, 't'):
message = 'You must first press Simulate, then Stop '\
'and then Continue'
tkMessageBox.Message(icon='info', type='ok',
message=message, title='About').show()
return
# the user may have changed parameters, but changing n is illegal
if self.w.s.numerical_prm['n'] != self.nGUI['no of grid cells']:
message = 'You have changed the grid. This has no effect '\
'in a continued simulation. Start new simulation.'
tkMessageBox.Message(icon='info', type='ok',
message=message, title='About').show()
return
self.w.s.set(tstop=self.nGUI['stop time for simulation'],
safety_factor=self.nGUI['safety factor for time step'])
self.w.g.configure(sleep=1.0-self.nGUI['movie speed'])
self.w.s.dump()
# no self.w.s.set_ic(); we start from previous state
self.w.s.solve_problem()
def _setup_shapes(self):
# I and c functions (H=-c):
class GaussianBell:
"""Gaussian Bell at x0 with st.dev. sigma."""
def __init__(self, x0, sigma, amplitude):
self.x0 = x0; self.sigma = sigma
self.amplitude = amplitude
def __call__(self, x):
return self.amplitude*exp(-0.5*((x-self.x0)/self.sigma)**2)
def __str__(self):
return 'amplitude*exp(-0.5*((x-x0)/sigma)**2)'
def parameters(self):
return {'x0': self.x0, 'sigma': self.sigma,
'amplitude': 1.0}
gb = GaussianBell(0, 0.5, 1.0)
l = [FuncSpec(UserFunction, name='Gaussian bell',
independent_variables=['x'],
function_object=gb,
parameters=gb.parameters(),
formula=str(gb)),
FuncSpec(Drawing, name='Drawing',
independent_variables=['x'],
xcoor=seq(0,10,0.02)),
FuncSpec(UserFunction, name='Flat',
function_object=wrap2callable(0.0),
independent_variables=['x'],
formula='I(x)=0')]
self.I_list = l
class Slide1:
"""Underwater slide."""
def __init__(self, **kwargs):
self.d = {'Delta': -0.05, 'beta': 0.03, 'eps': 0.5, 'L': 11.0,
'K': 0.7, 'c': 1.2, 'alpha': -0.3, 'gamma': 0.7}
for prm in kwargs:
try:
self.d[prm] = kwargs[prm]
except NameError:
raise NameError, \
'illegal constructor keyword argument "%s"' % prm
self.__dict__.update(self.d)
def __call__(self, x, t):
stationary = self.Delta - self.beta*(x + self.eps)*\
(x + self.eps - self.L);
slide = self.K*1.0/(sqrt(2*pi*self.gamma))*exp(\
-(1.0/self.gamma)*(x - \
(self.L + self.eps + 2)/5.0 + \
self.c*exp(self.alpha*t))**2)
return sqrt(stationary - slide) # bottom is c**2, this is c
def parameters(self):
return self.d
def __str__(self):
s = 'Delta - beta*(x+eps)*(x+eps-L) -'\
'K*1.0/(sqrt(2*pi*gamma))*exp('\
'-(1.0/gamma)*(x - (L+eps+2)/5.0 + c*exp(alpha*t))**2)'
return s
slide = Slide1()
class Hdraw:
def __init__(self):
pass
def attach_func(self, drawing):
self.h = drawing
def __call__(self, x, t=0):
return sqrt(-self.h(x))
class BottomBell:
"""1 - Gaussian Bell at x0 with st.dev. sigma."""
def __init__(self, x0, sigma, amplitude):
self.x0 = x0; self.sigma = sigma
self.amplitude = amplitude
def __call__(self, x, t=0):
return sqrt(1 - self.amplitude*exp(
-0.5*((x-self.x0)/self.sigma)**2))
def __str__(self):
return '1 - amplitude*exp(-0.5*((x-x0)/sigma)**2)'
def parameters(self):
return {'x0': self.x0, 'sigma': self.sigma,
'amplitude': 0.5}
hbell = BottomBell(3, 0.5, 0.65)
# Parameters object?
h = [FuncSpec(UserFunction, name='Unit velocity',
independent_variables=['x'],
function_object=wrap2callable(1.0),
#function_object=lambda x: 1, gives non-vectorized func
formula='H(x,t)=1'),
FuncSpec(Drawing, name='Drawing',
independent_variables=['x'],
formula='draw depth function',
function_object=Hdraw(),
xcoor=seq(0,10,0.02)),
FuncSpec(UserFunction, name='Slide',
independent_variables=['x', 't'],
function_object=slide,
parameters=slide.parameters(),
formula=str(slide),
#image='Slide1_formula1.gif',
scrolled_frame={'width':400,'height':300}),
FuncSpec(UserFunction, name='Gaussian bell',
independent_variables=['x'],
function_object=hbell,
parameters=hbell.parameters(),
formula=str(hbell),),
]
self.H_list = h
if __name__ == '__main__':
root = Tk()
Pmw.initialise(root)
import scitools.misc; scitools.misc.fontscheme3(root)
w = WaveSimGUI(root)
root.mainloop()
| |
from __future__ import absolute_import
import datetime
import logging
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
# When updating RECENT_DATE, move it to
# within two years of the current date, and no
# earlier than 6 months ago.
RECENT_DATE = datetime.date(2016, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (six.binary_type,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
ssl_version = None
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.ssl_context = ssl_context
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(None),
cert_reqs=resolve_cert_reqs(None),
)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ssl_context=self.ssl_context,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided, we can try to guess. If the user gave
# us a cert database, we assume they want to use it: otherwise, if
# they gave us an SSL Context object we should use whatever is set for
# it.
if cert_reqs is None:
if ca_certs or ca_cert_dir:
cert_reqs = 'CERT_REQUIRED'
elif self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_context=context)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif context.verify_mode != ssl.CERT_NONE \
and not getattr(context, 'check_hostname', False) \
and self.assert_hostname is not False:
# While urllib3 attempts to always turn off hostname matching from
# the TLS library, this cannot always be done. So we check whether
# the TLS Library still thinks it's matching hostnames.
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| |
import datetime
import uuid
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import aggregates, fields
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.duration import duration_string
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there's only a single field to insert, the limit is 500
(SQLITE_MAX_COMPOUND_SELECT).
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_expression_support(self, expression):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev)
if isinstance(expression, bad_aggregates):
for expr in expression.get_source_expressions():
try:
output_field = expr.output_field
if isinstance(output_field, bad_fields):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev, and Variance '
'aggregations on date/time fields in sqlite3 '
'since date/time is saved as text.'
)
except FieldError:
# Not every subexpression has an output_field which is fine
# to ignore.
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def time_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_cast_date_sql(self, field_name, tzname):
return "django_datetime_cast_date(%s, %%s)" % field_name, [tzname]
def datetime_cast_time_sql(self, field_name, tzname):
return "django_datetime_cast_time(%s, %%s)" % field_name, [tzname]
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):
return "NULL"
def _quote_params_for_last_executed_query(self, params):
"""
Only for last_executed_query! Don't use this to execute SQL queries!
"""
# This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the
# number of parameters, default = 999) and SQLITE_MAX_COLUMN (the
# number of return values, default = 2000). Since Python's sqlite3
# module doesn't expose the get_limit() C API, assume the default
# limits are in effect and split the work in batches if needed.
BATCH_SIZE = 999
if len(params) > BATCH_SIZE:
results = ()
for index in range(0, len(params), BATCH_SIZE):
chunk = params[index:index + BATCH_SIZE]
results += self._quote_params_for_last_executed_query(chunk)
return results
sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params))
# Bypass Django's wrappers and use the underlying sqlite3 connection
# to avoid logging this query - it would trigger infinite recursion.
cursor = self.connection.connection.cursor()
# Native sqlite3 cursors cannot be used as context managers.
try:
return cursor.execute(sql, params).fetchone()
finally:
cursor.close()
def last_executed_query(self, cursor, sql, params):
# Python substitutes parameters in Modules/_sqlite/cursor.c with:
# pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars);
# Unfortunately there is no way to reach self->statement from Python,
# so we quote and substitute parameters manually.
if params:
if isinstance(params, (list, tuple)):
params = self._quote_params_for_last_executed_query(params)
else:
keys = params.keys()
values = tuple(params.values())
values = self._quote_params_for_last_executed_query(values)
params = dict(zip(keys, values))
return sql % params
# For consistency with SQLiteCursorWrapper.execute(), just return sql
# when there are no parameters. See #13648 and #17158.
else:
return sql
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return str(value)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif internal_type in ('NullBooleanField', 'BooleanField'):
converters.append(self.convert_booleanfield_value)
return converters
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.datetime):
value = parse_datetime(value)
if settings.USE_TZ and not timezone.is_aware(value):
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.date):
value = parse_date(value)
return value
def convert_timefield_value(self, value, expression, connection, context):
if value is not None:
if not isinstance(value, datetime.time):
value = parse_time(value)
return value
def convert_decimalfield_value(self, value, expression, connection, context):
if value is not None:
value = expression.output_field.format_number(value)
value = backend_utils.typecast_decimal(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
return bool(value) if value in (1, 0) else value
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s" % ", ".join(row)
for row in placeholder_rows
)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
if connector not in ['+', '-']:
raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector)
fn_params = ["'%s'" % connector] + sub_expressions
if len(fn_params) > 3:
raise ValueError('Too many params for timedelta operations.')
return "django_format_dtdelta(%s)" % ', '.join(fn_params)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import importlib
import logging
import os
import pathlib
import sys
import tempfile
import unittest
from unittest.mock import patch
import pytest
from parameterized import parameterized
from airflow.configuration import conf
from tests.test_utils.config import conf_vars
SETTINGS_FILE_VALID = """
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': '[%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
'task': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
},
},
'loggers': {
'airflow.task': {
'handlers': ['task'],
'level': 'INFO',
'propagate': False,
},
}
}
"""
SETTINGS_FILE_INVALID = """
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow.task': {
'format': '[%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow.task',
'stream': 'ext://sys.stdout'
}
},
'loggers': {
'airflow': {
'handlers': ['file.handler'], # this handler does not exists
'level': 'INFO',
'propagate': False
}
}
}
"""
SETTINGS_FILE_EMPTY = """
# Other settings here
"""
SETTINGS_DEFAULT_NAME = 'custom_airflow_local_settings'
def reset_logging():
"""Reset Logging"""
manager = logging.root.manager
manager.disabled = logging.NOTSET
airflow_loggers = [
logger for logger_name, logger in manager.loggerDict.items() if logger_name.startswith('airflow')
]
for logger in airflow_loggers:
if isinstance(logger, logging.Logger):
logger.setLevel(logging.NOTSET)
logger.propagate = True
logger.disabled = False
logger.filters.clear()
handlers = logger.handlers.copy()
for handler in handlers:
# Copied from `logging.shutdown`.
try:
handler.acquire()
handler.flush()
handler.close()
except (OSError, ValueError):
pass
finally:
handler.release()
logger.removeHandler(handler)
@contextlib.contextmanager
def settings_context(content, directory=None, name='LOGGING_CONFIG'):
"""
Sets a settings file and puts it in the Python classpath
:param content:
The content of the settings file
:param directory: the directory
:param name: str
"""
initial_logging_config = os.environ.get("AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS", "")
try:
settings_root = tempfile.mkdtemp()
filename = f"{SETTINGS_DEFAULT_NAME}.py"
if directory:
# Create the directory structure with __init__.py
dir_path = os.path.join(settings_root, directory)
pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True)
basedir = settings_root
for part in directory.split('/'):
open(os.path.join(basedir, '__init__.py'), 'w').close()
basedir = os.path.join(basedir, part)
open(os.path.join(basedir, '__init__.py'), 'w').close()
# Replace slashes by dots
module = directory.replace('/', '.') + '.' + SETTINGS_DEFAULT_NAME + '.' + name
settings_file = os.path.join(dir_path, filename)
else:
module = SETTINGS_DEFAULT_NAME + '.' + name
settings_file = os.path.join(settings_root, filename)
with open(settings_file, 'w') as handle:
handle.writelines(content)
sys.path.append(settings_root)
# Using environment vars instead of conf_vars so value is accessible
# to parent and child processes when using 'spawn' for multiprocessing.
os.environ["AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS"] = module
yield settings_file
finally:
os.environ["AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS"] = initial_logging_config
sys.path.remove(settings_root)
class TestLoggingSettings(unittest.TestCase):
# Make sure that the configure_logging is not cached
def setUp(self):
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
from airflow.config_templates import airflow_local_settings
from airflow.logging_config import configure_logging
for mod in list(sys.modules):
if mod not in self.old_modules:
del sys.modules[mod]
reset_logging()
importlib.reload(airflow_local_settings)
configure_logging()
# When we try to load an invalid config file, we expect an error
def test_loading_invalid_local_settings(self):
from airflow.logging_config import configure_logging, log
with settings_context(SETTINGS_FILE_INVALID):
with patch.object(log, 'error') as mock_info:
# Load config
with pytest.raises(ValueError):
configure_logging()
mock_info.assert_called_once_with(
'Unable to load the config, contains a configuration error.'
)
def test_loading_valid_complex_local_settings(self):
# Test what happens when the config is somewhere in a subfolder
module_structure = 'etc.airflow.config'
dir_structure = module_structure.replace('.', '/')
with settings_context(SETTINGS_FILE_VALID, dir_structure):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'info') as mock_info:
configure_logging()
mock_info.assert_called_once_with(
'Successfully imported user-defined logging config from %s',
f'etc.airflow.config.{SETTINGS_DEFAULT_NAME}.LOGGING_CONFIG',
)
# When we try to load a valid config
def test_loading_valid_local_settings(self):
with settings_context(SETTINGS_FILE_VALID):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'info') as mock_info:
configure_logging()
mock_info.assert_called_once_with(
'Successfully imported user-defined logging config from %s',
f'{SETTINGS_DEFAULT_NAME}.LOGGING_CONFIG',
)
# When we load an empty file, it should go to default
def test_loading_no_local_settings(self):
with settings_context(SETTINGS_FILE_EMPTY):
from airflow.logging_config import configure_logging
with pytest.raises(ImportError):
configure_logging()
# When the key is not available in the configuration
def test_when_the_config_key_does_not_exists(self):
from airflow import logging_config
with conf_vars({('logging', 'logging_config_class'): None}):
with patch.object(logging_config.log, 'debug') as mock_debug:
logging_config.configure_logging()
mock_debug.assert_any_call('Could not find key logging_config_class in config')
# Just default
def test_loading_local_settings_without_logging_config(self):
from airflow.logging_config import configure_logging, log
with patch.object(log, 'debug') as mock_info:
configure_logging()
mock_info.assert_called_once_with('Unable to load custom logging, using default config instead')
def test_1_9_config(self):
from airflow.logging_config import configure_logging
with conf_vars({('logging', 'task_log_reader'): 'file.task'}):
with pytest.warns(DeprecationWarning, match=r'file.task'):
configure_logging()
assert conf.get('logging', 'task_log_reader') == 'task'
def test_loading_remote_logging_with_wasb_handler(self):
"""Test if logging can be configured successfully for Azure Blob Storage"""
from airflow.config_templates import airflow_local_settings
from airflow.logging_config import configure_logging
from airflow.utils.log.wasb_task_handler import WasbTaskHandler
with conf_vars(
{
('logging', 'remote_logging'): 'True',
('logging', 'remote_log_conn_id'): 'some_wasb',
('logging', 'remote_base_log_folder'): 'wasb://some-folder',
}
):
importlib.reload(airflow_local_settings)
configure_logging()
logger = logging.getLogger('airflow.task')
assert isinstance(logger.handlers[0], WasbTaskHandler)
@parameterized.expand(
[
(
'cloudwatch://arn:aws:logs:aaaa:bbbbb:log-group:ccccc',
'arn:aws:logs:aaaa:bbbbb:log-group:ccccc',
),
(
'cloudwatch://arn:aws:logs:aaaa:bbbbb:log-group:aws/ccccc',
'arn:aws:logs:aaaa:bbbbb:log-group:aws/ccccc',
),
(
'cloudwatch://arn:aws:logs:aaaa:bbbbb:log-group:/aws/ecs/ccccc',
'arn:aws:logs:aaaa:bbbbb:log-group:/aws/ecs/ccccc',
),
]
)
def test_log_group_arns_remote_logging_with_cloudwatch_handler(
self, remote_base_log_folder, log_group_arn
):
"""Test if the correct ARNs are configured for Cloudwatch"""
from airflow.config_templates import airflow_local_settings
from airflow.logging_config import configure_logging
with conf_vars(
{
('logging', 'remote_logging'): 'True',
('logging', 'remote_log_conn_id'): 'some_cloudwatch',
('logging', 'remote_base_log_folder'): remote_base_log_folder,
}
):
importlib.reload(airflow_local_settings)
configure_logging()
assert (
airflow_local_settings.DEFAULT_LOGGING_CONFIG['handlers']['task']['log_group_arn']
== log_group_arn
)
| |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Queue Dataset
- Class responsible for putting entries in a queue and feeding them to the model through tf.data.dataset
"""
import logging
from queue import Queue
import time
from threading import Thread
import uuid
import numpy as np
from tornado.concurrent import Future
from diplomacy_research.models.datasets.base_builder import VarProtoField
from diplomacy_research.models.datasets.feedable_dataset import FeedableDataset
from diplomacy_research.utils.cluster import get_current_io_loop, PrefetchedItem
from diplomacy_research.settings import SESSION_RUN_TIMEOUT
# Constants
LOGGER = logging.getLogger(__name__)
RUNNING, PAUSED, CLOSED = 'RUNNING', 'PAUSED', 'CLOSED'
FILLING_QUEUE = '<filling>'
# ----------------------------------------------------------------------------
# ---------- THREADS METHODS ----------------
# ----------------------------------------------------------------------------
def run_single_or_list_callable(callable_or_list, *args, **kwargs):
""" Calls a single callable or a list of callable and returns a list of results
:param callable_or_list: A single callable or a list of callables
"""
if isinstance(callable_or_list, list):
return [func(*args, **kwargs) for func in callable_or_list]
return [callable_or_list(*args, **kwargs)]
def process_queues(dataset, in_main_thread=False):
""" This method will be launched in a separate thread and is in charge of rotating through the queues
:param dataset: The instantiated feedable dataset
:param in_main_thread: Boolean that indicates that we are running in the main Python thread.
:type dataset: QueueDataset
"""
from diplomacy_research.utils.tensorflow import tf
assert dataset.session is not None, 'Error - The dataset must have a session object attached to it'
def_options = tf.RunOptions(timeout_in_ms=SESSION_RUN_TIMEOUT)
while not dataset.all_threads_closing:
all_queues_are_empty = True
# Thread is paused
if dataset.all_threads_paused and not in_main_thread:
dataset.thread_status['process_queues'] = PAUSED
while dataset.all_threads_paused and not dataset.all_threads_closing:
time.sleep(0.1)
# Thread resumed / started
if not in_main_thread:
dataset.thread_status['process_queues'] = RUNNING
# Filling queues - Pausing
if dataset.last_queue == FILLING_QUEUE:
time.sleep(0.1)
continue
# Looping through every queue
for queue_name in list(dataset.feedable_queues):
queue = dataset.feedable_queues[queue_name]['queue']
outputs = dataset.feedable_queues[queue_name]['outputs']
placeholders = dataset.feedable_queues[queue_name]['placeholders']
with_status = dataset.feedable_queues[queue_name]['with_status']
pre_condition_hook = dataset.feedable_queues[queue_name]['hooks'].get('pre_condition', None)
pre_run_hook = dataset.feedable_queues[queue_name]['hooks'].get('pre_run', None)
post_run_hook = dataset.feedable_queues[queue_name]['hooks'].get('post_run', None)
pre_queue_hook = dataset.feedable_queues[queue_name]['hooks'].get('pre_queue', None)
post_queue_hook = dataset.feedable_queues[queue_name]['hooks'].get('post_queue', None)
queue_size = queue.qsize()
# [Hook] Pre-Condition
if pre_condition_hook is not None:
if False in run_single_or_list_callable(pre_condition_hook, dataset):
continue
# Setting this queue as active
if queue_size:
all_queues_are_empty = False
dataset.active_queue = queue_name
dataset.nb_items_to_pull_from_queue = queue_size
# [Hook] Pre-Queue
if pre_queue_hook is not None:
run_single_or_list_callable(pre_queue_hook, dataset)
# Initializing iterator
dataset.initialize(dataset.session)
remaining_dequeues = dataset.nb_items_to_pull_from_queue
# Processing queue
while remaining_dequeues > 0 and (dataset.nb_items_to_pull_from_queue > 0 or not dataset.is_done):
try:
# [Hook] Pre-Run
if pre_run_hook is not None:
run_single_or_list_callable(pre_run_hook, dataset)
results = dataset.session.run(outputs, feed_dict=placeholders, options=def_options)
dataset.model_results += [results]
remaining_dequeues -= results[0].shape[0]
# Status message
if with_status:
nb_items = results[0].shape[0]
LOGGER.info('[%s] Processed %d items. Remaining: %d/%d',
queue_name, nb_items, remaining_dequeues, queue_size)
# [Hook] Post-Run
if post_run_hook is not None:
run_single_or_list_callable(post_run_hook, dataset)
except (tf.errors.UnavailableError, tf.errors.AbortedError) as err:
LOGGER.warning('Received a fatal error on queue %s', queue_name)
raise err
except tf.errors.OutOfRangeError:
pass
# [Hook] Post-Queue
if post_queue_hook is not None:
run_single_or_list_callable(post_queue_hook, dataset)
# Processing results in main thread
if in_main_thread:
process_results(dataset, in_main_thread=in_main_thread)
# Sleeping if all queues were empty, or exiting if in main thread.
if all_queues_are_empty:
if in_main_thread:
break
time.sleep(0.1)
# Exiting
if not in_main_thread:
dataset.thread_status['process_queues'] = CLOSED
def process_results(dataset, in_main_thread=False):
""" This method will be launched in a separate thread and is in charge of settings the results on the calling
future objects, so that the method that has put an object in the queue knows that its results are ready.
:param dataset: The instantiated feedable dataset
:param in_main_thread: Boolean that indicates that we are running in the main Python thread.
:type dataset: QueueDataset
"""
while not dataset.all_threads_closing:
# Thread is paused
if dataset.all_threads_paused and not in_main_thread:
dataset.thread_status['process_results'] = PAUSED
while dataset.all_threads_paused and not dataset.all_threads_closing:
time.sleep(0.1)
# Thread resumed / started
if not in_main_thread:
dataset.thread_status['process_results'] = RUNNING
# No items in results queue, we can sleep, or exit if in main thread.
if not dataset.model_results:
if in_main_thread:
break
time.sleep(0.1)
continue
# Processing all items in the results queue
# The first item of the results is always the request_id
while dataset.model_results:
results = dataset.model_results.pop(0)
request_ids, output_results = results[0], results[1:]
nb_results = request_ids.shape[0]
# Determining if we have a queue with results
# As opposed to an operation queue that doesn't return tensors
# If the output results are a tuple (as opposed to a list for each item)
# We return the tuple to each item in the batch (i.e. results are shared)
if len(results) == 1:
has_results = False
elif isinstance(output_results[0], np.ndarray):
has_results = bool(output_results[0].shape and output_results[0].shape[0] == nb_results)
elif isinstance(output_results[0], list):
has_results = len(output_results[0]) == nb_results
elif isinstance(output_results, list) and output_results:
output_results = [[result] * nb_results for result in output_results]
has_results = True
else:
has_results = False
# Processing each request id
for result_ix in range(nb_results):
this_request_id = request_ids[result_ix].decode('utf-8')
this_result = [result[result_ix] for result in output_results] if has_results else None
# Unknown request_id, skipping
if this_request_id not in dataset.futures_ioloop:
continue
# Otherwise marking the future as completed
future, io_loop = dataset.futures_ioloop[this_request_id]
del dataset.futures_ioloop[this_request_id]
io_loop.asyncio_loop.call_soon_threadsafe(future.set_result, this_result)
# Exiting
if not in_main_thread:
dataset.thread_status['process_results'] = CLOSED
# ----------------------------------------------------------------------------
# ---------- DATASET ----------------
# ----------------------------------------------------------------------------
class QueueDataset(FeedableDataset):
""" This object is responsible for generating entries to feed the model (using the tf.data.dataset API) """
def __init__(self, batch_size, dataset_builder, cluster_config=None, no_iterator=False):
""" Constructor
:param batch_size: The size of a batch per tower
:param dataset_builder: An instance of `BaseBuilder` containing the proto-fields and generation methods
:param cluster_config: Optional. If set, the cluster configuration will be used for distributed training.
:param no_iterator: Boolean flag that indicates to not create an iterator (it will be loaded from a ckpt)
:type dataset_builder: diplomacy_research.models.datasets.base_builder.BaseBuilder
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
"""
super(QueueDataset, self).__init__(dataset_builder=dataset_builder,
cluster_config=cluster_config)
self.batch_size = batch_size
self.no_iterator = no_iterator
self.tf_dataset = None
# Creating iterator with init ops
self.init_op = None
self.output_features = None # This represents iterator.get_next()
# Feedable queues
self.feedable_queues = {}
self.active_queue = None
self.nb_items_to_pull_from_queue = 0
self.model_results = []
self.futures_ioloop = {} # Contains tuple (future, original thread io_loop)
self._dataset_is_done = False
self.last_queue = ''
# Threads
self.threads = {}
self.thread_status = {'process_queues': CLOSED, 'process_results': CLOSED}
self.all_threads_paused = False
self.all_threads_closing = True
# Building the dataset
self.build()
@property
def can_support_iterator(self):
""" Determines if the dataset can support an iterator or if it is a remote (RPC) dataset """
return True
@property
def is_done(self):
""" Returns True if the end of file has been reached """
return self._dataset_is_done
def mark_as_done(self):
""" Marks the dataset as having reached the end of the file"""
self._dataset_is_done = True
def build(self):
""" Builds the TensorFlow dataset """
from diplomacy_research.utils.tensorflow import tf, np_to_tf
assert 'request_id' in self.proto_fields, 'You need to have a "request_id" field.'
def feedable_generator():
""" Generator that feeds data into the feedable_dataset
When this functions exits/returns, a tf.errors.OutOfRangeError is triggered
"""
while True:
next_batch = self.get_next_feedable_batch()
if next_batch is None:
self.mark_as_done()
break
yield next_batch
# Padding output shapes with None
output_types = self.dataset_builder.output_types
output_shapes = self.dataset_builder.output_shapes
output_shapes = {key: [None] + list(shape) for key, shape in output_shapes.items()}
# Building a list of generic default values from the output types and output shapes
for feature_name, feature_shape in output_shapes.items():
if output_types[feature_name] == np.object:
self.default_features[feature_name] = bytes('', 'utf-8')
elif isinstance(self.proto_fields[feature_name], VarProtoField):
self.default_features[feature_name] = np.array([], dtype=output_types[feature_name])
else:
self.default_features[feature_name] = np.zeros(shape=feature_shape[1:],
dtype=output_types[feature_name])
# Creates dataset
tf_output_types = {key: np_to_tf(dtype) for key, dtype in output_types.items()}
tf_output_shapes = {key: tf.TensorShape(shape) for key, shape in output_shapes.items()}
self.tf_dataset = tf.data.Dataset.from_generator(feedable_generator,
output_types=tf_output_types,
output_shapes=tf_output_shapes)
self.tf_dataset = self.tf_dataset.prefetch(1)
# Creating iterator (with a new iterator_resource), unless specified otherwise
if not self.no_iterator:
self.create_iterator()
def start(self, session):
""" Starts the dataset
:param session: The TensorFlow session to use.
:type session: tensorflow.python.client.session.Session
"""
if self.is_started:
LOGGER.error('Dataset was already started. Not re-starting it.')
return
self.session = session
self.initialize(session)
self.start_threads()
self._is_started = True
def restart(self, session):
""" Restarts the threads using a (new) session object
:param session: The TensorFlow session to use.
:type session: tensorflow.python.client.session.Session
"""
if not self._is_started:
self.start(session)
return
self.session = session
self.start_threads()
def run(self):
""" Run process queues on the current thread - This is to use the session recoverability
Note: threads needs to be paused before calling this method
"""
if not self.all_threads_paused:
LOGGER.warning('You must pause the threads before calling run(). Aborting.')
return
process_queues(self, in_main_thread=True)
def start_threads(self):
""" (Re)-starts the threads """
if self.thread_status['process_queues'] != CLOSED or self.thread_status['process_results'] != CLOSED:
self.stop_threads()
self.all_threads_paused = False
self.all_threads_closing = False
self.threads['process_queues'] = Thread(target=process_queues, args=(self,), daemon=True)
self.threads['process_results'] = Thread(target=process_results, args=(self,), daemon=True)
self.threads['process_queues'].start()
self.threads['process_results'].start()
def stop_threads(self):
""" Stops the threads and waits for termination """
self.all_threads_paused = False
self.all_threads_closing = True
if self.threads['process_queues'] is not None:
self.threads['process_queues'].join()
self.threads['process_queues'] = None
self.thread_status['process_queues'] = CLOSED
if self.threads['process_results'] is not None:
self.threads['process_results'].join()
self.threads['process_results'] = None
self.thread_status['process_results'] = CLOSED
def pause_threads(self):
""" Pauses all running threads and wait for them to be paused """
self.all_threads_paused = True
self.all_threads_closing = False
start_time = int(time.time())
current_time = start_time
# Waiting for threads to pause
while (self.thread_status['process_queues'] == RUNNING
or self.thread_status['process_results'] == RUNNING
or current_time - start_time > 60):
time.sleep(1.)
current_time = int(time.time())
def resume_threads(self):
""" Resumes all running threads and wait for them to be resumed """
self.all_threads_paused = False
self.all_threads_closing = False
start_time = int(time.time())
current_time = start_time
# Waiting for threads to pause
while (self.thread_status['process_queues'] == PAUSED
or self.thread_status['process_results'] == PAUSED
or current_time - start_time > 60):
time.sleep(1.)
current_time = int(time.time())
def initialize(self, session):
""" Initializes the dataset (and its iterator)
:param session: The TensorFlow session to use.
:type session: tensorflow.python.client.session.Session
"""
# We haven't created an iterator yet
if self.iterator is None:
return
# Running init_op
# If session is wrapped, executing it without hooks
if hasattr(session, 'run_step_fn'):
session.run_step_fn(lambda step_context: step_context.session.run(self.init_op))
else:
session.run(self.init_op)
self._is_initialized = True
self._dataset_is_done = False
def create_iterator(self, iterator_resource=None, shared_name=None, features=None):
""" Creates an iterator object (optionally using a shared name and a specific iterator resource)
:param iterator_resource: A tf.resource scalar tf.Tensor representing the iterator.
:param shared_name: Optional. If non-empty, this iterator will be shared under the given name across
multiple sessions that share the same devices (e.g. when using a remote server).
:param features: If an iterator_resource is specified, this corresponds to the output of iterator.get_next()
:return: Nothing, but sets the self.iterator, self.features, and dataset init_ops
"""
if iterator_resource is not None and not self.no_iterator:
LOGGER.error('An iterator resource can only be set if the dataset was created with the "no_iterator" flag.')
raise RuntimeError("Cannot create new iterator")
if iterator_resource is not None and features is None:
LOGGER.error('The iterator features are required when reloading a saved iterator.')
raise ValueError()
# Loading Tensorflow
from diplomacy_research.utils.tensorflow import tf
output_types = self.tf_dataset.output_types
output_shapes = self.tf_dataset.output_shapes
output_classes = {key: tf.Tensor for key in output_types}
# Making sure iterator is on the right device/worker
with tf.device(self.cluster_config.iterator_device if self.cluster_config else None):
# We have an iterator resource, so we use it
if iterator_resource is not None:
self.iterator = tf.data.Iterator(iterator_resource=iterator_resource,
initializer=None,
output_types=output_types,
output_shapes=output_shapes,
output_classes=output_classes)
if features:
self.output_features = features
# Otherwise, we create a brand new iterator
else:
self.iterator = tf.data.Iterator.from_structure(output_types,
output_shapes,
shared_name=shared_name)
self.output_features = self.iterator.get_next()
# Generating init op
self._is_initialized = False
self.init_op = self.iterator.make_initializer(self.tf_dataset)
def create_queue(self, queue_name, outputs, default_values=None, placeholders=None, *,
pre_condition=None, pre_queue=None, post_queue=None, pre_run=None, post_run=None,
with_status=False):
""" Creates a new feedable queue
:param queue_name: The name of the queue to add
:param outputs: A list of outputs the model needs to run and return for this queue
:param default_values: A dictionary of default values that will be added to new items in the queue
:param placeholders: A feed dict of placeholders to automatically feed when processing the queue
:param pre_condition: [Hook] Callable or list of callables. Args: (dataset)
Is run before selecting the current queue. Must return True for all callables, otherw.
the queue is skipped.
:param pre_queue: [Hook] Callable or list of callables. Args: (dataset).
This hook is ran after the queue as been selected, but before any session.run.
:param post_queue: [Hook] Callable or list of callables. Args: (dataset).
This hook is ran after all session.run for a given queue.
:param pre_run: [Hook] Callable or list of callables. Args: (dataset)
This hook is ran before each session.run
:param post_run: [Hook] Callable or list of callables. Args: (dataset)
This hook is ran after each session.run
:param with_status: Boolean that indicates that we need to display a status message after session.run()
:return: Nothing
"""
if self.has_queue(queue_name):
LOGGER.warning('The feedable queue "%s" has already been defined. Please choose a new queue name.',
queue_name)
return
# Creating queue
self.feedable_queues[queue_name] = {'queue': Queue(),
'outputs': [self.output_features['request_id']] + list(outputs),
'default_values': default_values or {},
'placeholders': placeholders or {},
'with_status': with_status,
'hooks': {
'pre_condition': pre_condition,
'pre_queue': pre_queue,
'post_queue': post_queue,
'pre_run': pre_run,
'post_run': post_run
}}
def has_queue(self, queue_name):
""" Determines if the feedable dataset already has a queue with the specified name """
return queue_name in self.feedable_queues
def get_results(self, queue_name, item, retry_on_failure=True, **kwargs):
""" Computes the outputs of a name using item as input
:param queue_name: The name of the queue where to put the item
:param item: A dictionary with the fields required for that queue
:param retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
:param kwargs: Additional optional kwargs:
- prefetch: Boolean that indicates to return a PrefetchedItem, otherwise returns a future.
:return: Either:
- if not prefetch, a Future that will be set with the results when they become available
- if prefetch, a PrefetchedItem that can be put in the queue.
"""
if not self.has_queue(queue_name):
LOGGER.warning('The queue "%s" could not be found.', queue_name)
return Future().set_result(None)
if not isinstance(item, dict):
LOGGER.warning('The item object passed to get_results must be a dictionary.')
return Future().set_result(None)
request_id = str(uuid.uuid4())
item['request_id'] = bytes(request_id, 'utf-8')
item = self.prepare_item(item)
# Adding default values provided
for default_key, default_val in self.feedable_queues[queue_name]['default_values'].items():
if default_key not in item:
item[default_key] = default_val
# Adding generic default values (all zeros)
for default_key, default_val in self.default_features.items():
if default_key not in item:
item[default_key] = default_val
# Prefetching - We return a prefetched item
if kwargs.get('prefetch', False):
return PrefetchedItem(queue_name, item)
# Otherwise, we put the item in the queue and return a future
return self.put_item_in_queue(queue_name, item)
def put_item_in_queue(self, queue_name, item):
""" Puts an item in the queue, so that it can be processed by a TensorFlow session.
:param queue_name: The name of the queue where to put the item
:param item: A dictionary with the fields required for that queue.
:return: A Future that will be set with the results when they become available
"""
item_future = Future()
request_id = item['request_id'].decode('utf-8')
self.futures_ioloop[request_id] = (item_future, get_current_io_loop())
self.feedable_queues[queue_name]['queue'].put_nowait(item)
return item_future
def get_next_feedable_batch(self):
""" Returns the next feedable batch in the active queue, None otherwise
The batch is a dictionary with feature names as key, and list of numpy arrays as values
"""
if not self.active_queue \
or not self.has_queue(self.active_queue) \
or self.nb_items_to_pull_from_queue <= 0:
return None
# Building batch
max_items_in_batch = min(self.nb_items_to_pull_from_queue, self.batch_size)
batch = {key: [] for key in self.output_features.keys()}
nb_items_in_batch = 0
max_var_len = {key: 0 for key in self.proto_fields if isinstance(self.proto_fields[key], VarProtoField)}
# Building batch - No padding yet
while nb_items_in_batch < max_items_in_batch:
next_batch_of_data = self.feedable_queues[self.active_queue]['queue'].get()
for key in self.output_features.keys():
batch[key] += [next_batch_of_data[key]]
if key in max_var_len:
max_var_len[key] = max(max_var_len[key], len(next_batch_of_data[key]))
nb_items_in_batch += 1
# Padding all var len features to the maximum length in batch
for padded_feature in max_var_len:
for item_ix in range(nb_items_in_batch):
current_len = len(batch[padded_feature][item_ix])
max_len = max_var_len[padded_feature]
if current_len < max_len:
batch[padded_feature][item_ix] = np.pad(batch[padded_feature][item_ix],
(0, max_len - current_len),
mode='constant')
# Sending to generator
self.nb_items_to_pull_from_queue -= nb_items_in_batch
return batch
def make_session_run_hook(self):
""" Builds a SessionRunHook for the MonitoredTrainingSession object """
from diplomacy_research.utils.tensorflow import QueueDatasetSessionRunHook
return QueueDatasetSessionRunHook(self)
def close(self):
""" Stops iterating the dataset """
self._is_closing = True
self.tf_dataset = None
self.stop_threads()
| |
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
import vcs, cdms, cdutil
import inspect
import os, sys
class cdms_dataarray(Module) :
def __init__(self, arr) :
self.arr = arr
class cdms_dataset(Module) :
def __init__(self, dataset) :
self.dataset = dataset
def slice(self, id, kwargs) :
dummy = []
return self.dataset(id, *dummy, **kwargs)
class cdms_get_data(Module) :
def compute(self) :
if not self.has_input('id') :
print "Error: must have id input"
return
if not self.has_input('dataset') :
print "Error: must have dataset input"
return
id = self.get_input('id')
dataset = self.get_input('dataset')
kwargs = {}
if (self.has_input('arg_key_1') and
self.has_input('arg_val_1')) :
k = self.get_input('arg_key_1')
t = self.get_input('arg_val_1')
kwargs[k] = t
if (self.has_input('arg_key_2') and
self.has_input('arg_val_2')) :
k = self.get_input('arg_key_2')
t = self.get_input('arg_val_2')
kwargs[k] = t
if (self.has_input('arg_key_3') and
self.has_input('arg_val_3')) :
k = self.get_input('arg_key_3')
t = self.get_input('arg_val_3')
kwargs[k] = t
dummy = []
arr = dataset.slice(id, kwargs)
darr = cdms_dataarray(arr)
self.set_output("cdms_dataarray", darr)
class cdms_open(Module) :
def compute(self) :
args = inspect.getargspec(cdms.open)
def_args = args[3]
uri = None
mode = def_args[0]
template = def_args[1]
dods = def_args[2]
if not self.has_input('uri') :
print "Error: must have uri input"
return
if self.has_input('uri') :
inuri = self.get_input('uri')
uri = os.path.join(sys.prefix, inuri)
if self.has_input('mode') :
mode = self.get_input('mode')
if self.has_input('template') :
template = self.get_input('template')
if self.has_input('dods') :
dods = self.get_input('dods')
# output the cdmsfile object.
cdmsfile = cdms.open(uri,mode,template,dods)
output = cdms_dataset(cdmsfile)
self.set_output("cdms_dataset", output)
# just wrap whatever so it can be sent downstream
class generic_port(Module) :
def __init__(self, data) :
self.data = data
class vcs_canvas(Module) :
def __init__(self) :
Module.__init__(self)
self.canvas = None
def compute(self):
if self.canvas == None :
print "calling vcs.init()"
self.canvas = vcs.init()
self.set_output("vcs_canvas", self)
class vcs_canvas_getboxfill(Module) :
def compute(self) :
if not self.has_input('canvas') :
print "ERROR: Must have canvas input port"
return
bname = None
if self.has_input('boxfill name') :
bname = self.get_input('boxfill name')
vcs_c = self.get_input('canvas')
if bname == None :
bfm = vcs_c.canvas.getboxfill()
else :
bfm = vcs_c.canvas.getboxfill(bname)
out = generic_port(bfm)
self.set_output("boxfill graphics method", out)
class vcs_canvas_gettemplate(Module) :
def compute(self) :
if not self.has_input('canvas') :
print "ERROR: Must have canvas input port"
return
tname = None
if self.has_input('template name') :
tname = self.get_input('template name')
vcs_c = self.get_input('canvas')
if tname == None :
t = vcs_c.canvas.gettemplate()
else :
t = vcs_c.canvas.gettemplate(tname)
out = generic_port(t)
self.set_output("template", out)
class vcs_canvas_plot(Module) :
def compute(self) :
canvas = self.get_input('vcs_canvas').canvas
data1 = self.get_input('array1').arr
data2 = None
if self.has_input('array2') :
data2 = self.get_input('array2').arr
gm = self.get_input('graphics_method').data
t = self.get_input('template_name').data
# build up the kewword arguments from the optional inputs.
kwargs = {}
kwargs['bg'] = 1
if self.has_input('ratio') :
kwargs['ratio'] = self.get_input('ratio')
#variable attribute keys
if self.has_input('comment1') :
kwargs['comment1'] = self.get_input('comment1')
if self.has_input('comment2') :
kwargs['comment2'] = self.get_input('comment2')
if self.has_input('comment3') :
kwargs['comment3'] = self.get_input('comment3')
if self.has_input('comment4') :
kwargs['comment4'] = self.get_input('comment4')
if self.has_input('file_comment') :
kwargs['file_comment'] = self.get_input('file_comment')
if self.has_input('hms') :
kwargs['hms'] = self.get_input('hms')
if self.has_input('long_name') :
kwargs['long_name'] = self.get_input('long_name')
if self.has_input('name') :
kwargs['name'] = self.get_input('name')
if self.has_input('time') :
kwargs['time'] = self.get_input('time')
if self.has_input('units') :
kwargs['units'] = self.get_input('units')
if self.has_input('ymd') :
kwargs['ymd'] = self.get_input('ymd')
# dimension attribute keys
if self.has_input('xarray1') :
kwargs['xarray1'] = self.get_input('xarray1')
if self.has_input('yarray1') :
kwargs['yarray1'] = self.get_input('yarray1')
if self.has_input('zarray1') :
kwargs['zarray1'] = self.get_input('zarray1')
if self.has_input('tarray1') :
kwargs['tarray1'] = self.get_input('tarray1')
if self.has_input('warray1') :
kwargs['warray1'] = self.get_input('warray1')
if self.has_input('xarray2') :
kwargs['xarray2'] = self.get_input('xarray2')
if self.has_input('yarray2') :
kwargs['yarray2'] = self.get_input('yarray2')
if self.has_input('zarray2') :
kwargs['zarray2'] = self.get_input('zarray2')
if self.has_input('tarray2') :
kwargs['tarray2'] = self.get_input('tarray2')
if self.has_input('warray2') :
kwargs['warray2'] = self.get_input('warray2')
if self.has_input('xbounds') :
kwargs['xbounds'] = self.get_input('xbounds')
if self.has_input('ybounds') :
kwargs['ybounds'] = self.get_input('ybounds')
if self.has_input('xname') :
kwargs['xname'] = self.get_input('xname')
if self.has_input('yname') :
kwargs['yname'] = self.get_input('yname')
if self.has_input('zname') :
kwargs['zname'] = self.get_input('zname')
if self.has_input('tname') :
kwargs['tname'] = self.get_input('tname')
if self.has_input('wname') :
kwargs['wname'] = self.get_input('wname')
if self.has_input('xunits') :
kwargs['xunits'] = self.get_input('xunits')
if self.has_input('yunits') :
kwargs['yunits'] = self.get_input('yunits')
if self.has_input('zunits') :
kwargs['zunits'] = self.get_input('zunits')
if self.has_input('tunits') :
kwargs['tunits'] = self.get_input('tunits')
if self.has_input('wunits') :
kwargs['wunits'] = self.get_input('wunits')
if self.has_input('xweights') :
kwargs['xweights'] = self.get_input('xweights')
if self.has_input('yweights') :
kwargs['yweights'] = self.get_input('yweights')
if data2 == None :
canvas.plot(data1, gm, t, **kwargs)
else :
canvas.plot(data1, data2, gm, t, **kwargs)
o = self.interpreter.filePool.create_file(suffix='.cdat.gif')
canvas.gif(o.name)
self.set_output("image", o)
def initialize(*args, **keywords):
reg = core.modules.module_registry
reg.addModule(cdms_open)
reg.addInputPort(cdms_open, 'dods',
(core.modules.basic_modules.String, 'tip'))
reg.addInputPort(cdms_open, 'template',
(core.modules.basic_modules.String, 'tip'))
reg.addInputPort(cdms_open, 'mode',
(core.modules.basic_modules.String, 'tip'))
reg.addInputPort(cdms_open, 'uri',
(core.modules.basic_modules.String, 'tip'))
# output dataset.
reg.addOutputPort(cdms_open, "cdms_dataset",
(cdms_dataset, 'cdms.dataset'))
#holds the tetgenio class, and acts as a port.
reg.addModule(cdms_dataset)
reg.addModule(cdms_get_data)
reg.addInputPort(cdms_get_data, 'dataset',
(cdms_dataset, 'dataset'))
reg.addInputPort(cdms_get_data, 'id',
(core.modules.basic_modules.String, 'dataset id'))
reg.addInputPort(cdms_get_data, 'arg_key_1',
(core.modules.basic_modules.String, 'argument 1 key'))
reg.addInputPort(cdms_get_data, 'arg_val_1',
([core.modules.basic_modules.Float,
core.modules.basic_modules.Float]))
reg.addInputPort(cdms_get_data, 'arg_key_2',
(core.modules.basic_modules.String, 'argument 2 key'))
reg.addInputPort(cdms_get_data, 'arg_val_2',
([core.modules.basic_modules.Float,
core.modules.basic_modules.Float]))
reg.addInputPort(cdms_get_data, 'arg_key_3',
(core.modules.basic_modules.String, 'argument 3 key'))
reg.addInputPort(cdms_get_data, 'arg_val_3',
([core.modules.basic_modules.Float,
core.modules.basic_modules.Float]))
# output data array.
reg.addOutputPort(cdms_get_data, "cdms_dataarray",
(cdms_dataarray, 'data'))
# port for passing data arrays
reg.addModule(cdms_dataarray)
# port for passing anything
reg.addModule(generic_port)
#canvas module
reg.addModule(vcs_canvas)
reg.addOutputPort(vcs_canvas, "vcs_canvas",
(vcs_canvas, 'the canvas'))
#canvas module methods...
reg.addModule(vcs_canvas_getboxfill)
reg.addInputPort(vcs_canvas_getboxfill, 'canvas',
(vcs_canvas, 'the canvas'))
reg.addInputPort(vcs_canvas_getboxfill, 'boxfill name',
(core.modules.basic_modules.String, 'boxfill method name'))
reg.addOutputPort(vcs_canvas_getboxfill, "boxfill graphics method",
(generic_port, 'graphics method'))
reg.addModule(vcs_canvas_gettemplate)
reg.addInputPort(vcs_canvas_gettemplate, 'canvas',
(vcs_canvas, 'the canvas'))
reg.addInputPort(vcs_canvas_gettemplate, 'template name',
(core.modules.basic_modules.String, 'template name'))
reg.addOutputPort(vcs_canvas_gettemplate, "template",
(generic_port, 'template'))
reg.addModule(vcs_canvas_plot)
reg.addInputPort(vcs_canvas_plot, 'vcs_canvas',
vcs_canvas)
reg.addInputPort(vcs_canvas_plot, 'array1',
cdms_dataarray)
reg.addInputPort(vcs_canvas_plot, 'array2',
cdms_dataarray)
reg.addInputPort(vcs_canvas_plot, 'template_name',
generic_port)
reg.addInputPort(vcs_canvas_plot, 'graphics_method',
generic_port)
reg.addInputPort(vcs_canvas_plot, 'graphics_name',
generic_port)
# keyword args
reg.addInputPort(vcs_canvas_plot, 'ratio',
core.modules.basic_modules.String, True)
# Variable attribute keys
reg.addInputPort(vcs_canvas_plot, 'comment1',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'comment2',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'comment3',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'comment4',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'file_comment',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'hms',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'long_name',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'name',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'time',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'units',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'ymd',
core.modules.basic_modules.String, True)
# Dimension attribuge keys
reg.addInputPort(vcs_canvas_plot, 'xarray1',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'yarray1',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'zarray1',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'tarray1',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'warray1',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'xarray2',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'yarray2',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'zarray2',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'tarray2',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'warray2',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'xbounds',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'ybounds',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'xname',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'yname',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'zname',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'tname',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'wname',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'xunits',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'yunits',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'zunits',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'tunits',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'wunits',
core.modules.basic_modules.String, True)
reg.addInputPort(vcs_canvas_plot, 'xweights',
cdms_dataarray, True)
reg.addInputPort(vcs_canvas_plot, 'yweights',
cdms_dataarray, True)
reg.addOutputPort(vcs_canvas_plot, "image",
(core.modules.basic_modules.File, 'rendered image'))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib2
import json
from lxml import etree
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__),
'..', '..', '..', '..', 'keystone')))
import unittest
import test_common as utils
class TenantTest(unittest.TestCase):
def setUp(self):
self.tenant = 'test_tenant'
self.auth_token = utils.get_auth_token()
self.user = utils.get_user()
self.userdisabled = utils.get_userdisabled()
self.exp_auth_token = utils.get_exp_auth_token()
self.disabled_token = utils.get_disabled_token()
utils.create_tenant(self.tenant, str(self.auth_token))
utils.create_user(self.tenant, self.user, self.auth_token)
utils.add_user_json(self.tenant, self.user, self.auth_token)
self.token = utils.get_token(self.user, 'secrete', self.tenant,
'token')
def tearDown(self):
utils.delete_user(self.tenant, self.user, self.auth_token)
utils.delete_tenant(self.tenant, self.auth_token)
class CreateTenantTest(TenantTest):
def test_tenant_create(self):
utils.delete_user(self.tenant, self.user, self.auth_token)
utils.delete_tenant(self.tenant, str(self.auth_token))
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
if int(resp['status']) not in (200, 201):
self.fail('Failed due to %d' % int(resp['status']))
def test_tenant_create_xml(self):
utils.delete_user(self.tenant, self.user, self.auth_token)
utils.delete_tenant_xml(self.tenant,
str(self.auth_token))
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
if int(resp['status']) not in (200, 201):
self.fail('Failed due to %d' % int(resp['status']))
def test_tenant_create_again(self):
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(409, int(resp['status']))
def test_tenant_create_again_xml(self):
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(409, int(resp['status']))
def test_tenant_create_forbidden_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
url = '%stenants' % (utils.URL)
body = {"tenant": {"id": self.tenant,
"description": "A description ...",
"enabled": True}}
resp, content = header.request(url, "POST", body=json.dumps(body),
headers={"Content-Type": "application/json",
"X-Auth-Token": self.token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_tenant_create_forbidden_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 200:
self.tenant = content.get('id')
url = '%stenants' % (utils.URL)
body = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true" id="%s"> \
<description>A description...</description> \
</tenant>' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_tenant_create_expired_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
url = '%stenants' % (utils.URL)
body = {"tenant": {"id": self.tenant,
"description": "A description ...",
"enabled": True}}
resp, content = header.request(url, "POST", body=json.dumps(body),
headers={"Content-Type": "application/json",
"X-Auth-Token": self.exp_auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
def test_tenant_create_expired_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 200:
self.tenant = content.get('id')
url = '%stenants' % (utils.URL)
body = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true" id="%s"> \
<description>A description...</description> \
</tenant>' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.exp_auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
def test_tenant_create_missing_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
url = '%stenants' % (utils.URL)
body = {"tenant": {"id": self.tenant,
"description": "A description ...",
"enabled": True}}
resp, content = header.request(url, "POST", body=json.dumps(body),
headers={"Content-Type": "application/json"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_tenant_create_missing_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 200:
self.tenant = content.get('id')
url = '%stenants' % (utils.URL)
body = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true" id="%s"> \
<description>A description...</description> \
</tenant>' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/xml",
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_tenant_create_disabled_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
url = '%stenants' % (utils.URL)
body = '{"tenant": { "id": "%s", \
"description": "A description ...", "enabled"\
:true } }' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/json",
"X-Auth-Token": self.disabled_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
def test_tenant_create_disabled_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 200:
self.tenant = content.get('id')
url = '%stenants' % (utils.URL)
body = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true" id="%s"> \
<description>A description...</description> \
</tenant>' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.disabled_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
def test_tenant_create_invalid_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant,
str(self.auth_token))
if int(resp['status']) == 200:
self.tenant = content['tenant']['id']
url = '%stenants' % (utils.URL)
body = '{"tenant": { "id": "%s", \
"description": "A description ...", "enabled"\
:true } }' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/json",
"X-Auth-Token": 'nonexsitingtoken'})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
def test_tenant_create_invalid_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
content = etree.fromstring(content)
if int(resp['status']) == 200:
self.tenant = content.get('id')
url = '%stenants' % (utils.URL)
body = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true" id="%s"> \
<description>A description...</description> \
</tenant>' % self.tenant
resp, content = header.request(url, "POST", body=body,
headers={"Content-Type": "application/xml",
"X-Auth-Token": 'nonexsitingtoken',
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
class GetTenantsTest(TenantTest):
def test_get_tenants(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='{}',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
def test_get_tenants_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
def test_get_tenants_unauthorized_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='{}',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_get_tenants_unauthorized_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(401, int(resp['status']))
def test_get_tenants_exp_token(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='{}',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.exp_auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
def test_get_tenants_exp_token_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.exp_auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(403, int(resp['status']))
class GetTenantTest(TenantTest):
def test_get_tenant(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='{}',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
def test_get_tenant_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
def test_get_tenant_bad(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, 'tenant_bad')
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
def test_get_tenant_bad_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, 'tenant_bad')
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
def test_get_tenant_not_found(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/NonexistingID' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='{}',
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
def test_get_tenant_not_found_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/NonexistingID' % (utils.URL)
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body='',
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
class UpdateTenantTest(TenantTest):
def test_update_tenant(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
data = '{"tenant": { "description": "A NEW description..." ,\
"enabled":true }}'
#test for Content-Type = application/json
resp, content = header.request(url, "PUT", body=data,
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
body = json.loads(content)
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
self.assertEqual(self.tenant, body['tenant']['id'])
self.assertEqual('A NEW description...', body['tenant']['description'])
def test_update_tenant_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant_xml(self.tenant,
str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
data = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true"> \
<description>A NEW description...</description> \
</tenant>'
#test for Content-Type = application/json
resp, content = header.request(url, "PUT", body=data,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
body = etree.fromstring(content)
desc = body.find("{http://docs.openstack.org/idm/api/v1.0}description")
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(200, int(resp['status']))
self.assertEqual(self.tenant, body.get('id'))
self.assertEqual('A NEW description...', desc.text)
def test_update_tenant_bad(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
data = '{"tenant": { "description_bad": "A NEW description...",\
"enabled":true }}'
#test for Content-Type = application/json
resp, content = header.request(url, "PUT", body=data,
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(400, int(resp['status']))
def test_update_tenant_bad_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/%s' % (utils.URL, self.tenant)
data = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true"> \
<description_bad>A NEW description...</description> \
</tenant>'
#test for Content-Type = application/json
resp, content = header.request(url, "PUT", body=data,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(400, int(resp['status']))
def test_update_tenant_not_found(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/NonexistingID' % (utils.URL)
data = '{"tenant": { "description": "A NEW description...",\
"enabled":true }}'
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body=data,
headers={"Content-Type": "application/json",
"X-Auth-Token": self.auth_token})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
def test_update_tenant_not_found_xml(self):
header = httplib2.Http(".cache")
resp, content = utils.create_tenant(self.tenant, str(self.auth_token))
url = '%stenants/NonexistingID' % (utils.URL)
data = '<?xml version="1.0" encoding="UTF-8"?> \
<tenant xmlns="http://docs.openstack.org/idm/api/v1.0" \
enabled="true"> \
<description_bad>A NEW description...</description> \
</tenant>'
#test for Content-Type = application/json
resp, content = header.request(url, "GET", body=data,
headers={"Content-Type": "application/xml",
"X-Auth-Token": self.auth_token,
"ACCEPT": "application/xml"})
if int(resp['status']) == 500:
self.fail('IDM fault')
elif int(resp['status']) == 503:
self.fail('Service Not Available')
self.assertEqual(404, int(resp['status']))
class DeleteTenantTest(TenantTest):
def test_delete_tenant_not_found(self):
#resp,content=utils.create_tenant("test_tenant_delete",
# str(self.auth_token))
resp = utils.delete_tenant("test_tenant_delete111",
str(self.auth_token))
self.assertEqual(404, int(resp['status']))
def test_delete_tenant_not_found_xml(self):
#resp,content=utils.create_tenant("test_tenant_delete",
# str(self.auth_token))
resp = utils.delete_tenant_xml("test_tenant_delete111",
str(self.auth_token))
self.assertEqual(404, int(resp['status']))
def test_delete_tenant(self):
resp, content = utils.create_tenant("test_tenant_delete",
str(self.auth_token))
resp = utils.delete_tenant("test_tenant_delete",
str(self.auth_token))
self.assertEqual(204, int(resp['status']))
def test_delete_tenant_xml(self):
resp, content = utils.create_tenant_xml("test_tenant_delete",
str(self.auth_token))
resp = utils.delete_tenant_xml("test_tenant_delete",
str(self.auth_token))
self.assertEqual(204, int(resp['status']))
if __name__ == '__main__':
unittest.main()
| |
""" Plugin Loading & Management.
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import os
import re
import sys
import time
import warnings
from itertools import ifilter
from path import path
from requests import RequestException
from flexget import plugins as plugins_pkg
from flexget import config_schema
from flexget.event import add_event_handler as add_phase_handler
from flexget.event import fire_event, remove_event_handlers
log = logging.getLogger('plugin')
__all__ = ['PluginWarning', 'PluginError', 'register_plugin', 'register_parser_option', 'register_task_phase',
'get_plugin_by_name', 'get_plugins_by_group', 'get_plugin_keywords', 'get_plugins_by_phase',
'get_phases_by_plugin', 'internet', 'priority']
class DependencyError(Exception):
"""Plugin depends on other plugin, but it cannot be loaded.
Args:
issued_by: name of the plugin trying to do the import
missing: name of the plugin or library that is missing
message: user readable error message
All args are optional.
"""
def __init__(self, issued_by=None, missing=None, message=None, silent=False):
super(DependencyError, self).__init__()
self.issued_by = issued_by
self.missing = missing
self._message = message
self.silent = silent
def _get_message(self):
if self._message:
return self._message
else:
return 'Plugin `%s` requires dependency `%s`' % (self.issued_by, self.missing)
def _set_message(self, message):
self._message = message
def has_message(self):
return self._message is not None
message = property(_get_message, _set_message)
def __str__(self):
return '<DependencyError(issued_by=%r,missing=%r,message=%r,silent=%r)>' % \
(self.issued_by, self.missing, self.message, self.silent)
class RegisterException(Exception):
def __init__(self, value):
super(RegisterException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class PluginWarning(Warning):
def __init__(self, value, logger=log, **kwargs):
super(PluginWarning, self).__init__()
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.value
class PluginError(Exception):
def __init__(self, value, logger=log, **kwargs):
super(PluginError, self).__init__()
# Value is expected to be a string
if not isinstance(value, basestring):
value = unicode(value)
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.value)
# TODO: move to utils or somewhere more appropriate
class internet(object):
"""@internet decorator for plugin phase methods.
Catches all internet related exceptions and raises PluginError with relevant message.
Task handles PluginErrors by aborting the task.
"""
def __init__(self, logger=None):
if logger:
self.log = logger
else:
self.log = logging.getLogger('@internet')
def __call__(self, func):
def wrapped_func(*args, **kwargs):
from httplib import BadStatusLine
import urllib2
try:
return func(*args, **kwargs)
except RequestException as e:
log.debug('decorator caught RequestException. handled traceback:', exc_info=True)
raise PluginError('RequestException: %s' % e)
except urllib2.HTTPError as e:
raise PluginError('HTTPError %s' % e.code, self.log)
except urllib2.URLError as e:
log.debug('decorator caught urlerror. handled traceback:', exc_info=True)
raise PluginError('URLError %s' % e.reason, self.log)
except BadStatusLine:
log.debug('decorator caught badstatusline. handled traceback:', exc_info=True)
raise PluginError('Got BadStatusLine', self.log)
except ValueError as e:
log.debug('decorator caught ValueError. handled traceback:', exc_info=True)
raise PluginError(e)
except IOError as e:
log.debug('decorator caught ioerror. handled traceback:', exc_info=True)
if hasattr(e, 'reason'):
raise PluginError('Failed to reach server. Reason: %s' % e.reason, self.log)
elif hasattr(e, 'code'):
raise PluginError('The server couldn\'t fulfill the request. Error code: %s' % e.code, self.log)
raise PluginError('IOError when connecting to server: %s' % e, self.log)
return wrapped_func
def priority(value):
"""Priority decorator for phase methods"""
def decorator(target):
target.priority = value
return target
return decorator
DEFAULT_PRIORITY = 128
plugin_contexts = ['task', 'root']
# task phases, in order of their execution; note that this can be extended by
# registering new phases at runtime
task_phases = ['start', 'input', 'metainfo', 'filter', 'download', 'modify', 'output', 'learn', 'exit']
# map phase names to method names
phase_methods = {
# task
'abort': 'on_task_abort' # special; not a task phase that gets called normally
}
phase_methods.update((_phase, 'on_task_' + _phase) for _phase in task_phases) # DRY
# Mapping of plugin name to PluginInfo instance (logical singletons)
plugins = {}
# Loading done?
plugins_loaded = False
_loaded_plugins = {}
_plugin_options = []
_new_phase_queue = {}
def register_task_phase(name, before=None, after=None):
"""Adds a new task phase to the available phases."""
if before and after:
raise RegisterException('You can only give either before or after for a phase.')
if not before and not after:
raise RegisterException('You must specify either a before or after phase.')
if name in task_phases or name in _new_phase_queue:
raise RegisterException('Phase %s already exists.' % name)
def add_phase(phase_name, before, after):
if not before is None and not before in task_phases:
return False
if not after is None and not after in task_phases:
return False
# add method name to phase -> method lookup table
phase_methods[phase_name] = 'on_task_' + phase_name
# place phase in phase list
if before is None:
task_phases.insert(task_phases.index(after) + 1, phase_name)
if after is None:
task_phases.insert(task_phases.index(before), phase_name)
return True
# if can't add yet (dependencies) queue addition
if not add_phase(name, before, after):
_new_phase_queue[name] = [before, after]
for phase_name, args in _new_phase_queue.items():
if add_phase(phase_name, *args):
del _new_phase_queue[phase_name]
class PluginInfo(dict):
"""
Allows accessing key/value pairs of this dictionary subclass via
attributes. Also instantiates a plugin and initializes properties.
"""
# Counts duplicate registrations
dupe_counter = 0
def __init__(self, plugin_class, name=None, groups=None, builtin=False, debug=False, api_ver=1,
contexts=None, category=None):
"""
Register a plugin.
:param plugin_class: The plugin factory.
:param string name: Name of the plugin (if not given, default to factory class name in underscore form).
:param list groups: Groups this plugin belongs to.
:param bool builtin: Auto-activated?
:param bool debug: True if plugin is for debugging purposes.
:param int api_ver: Signature of callback hooks (1=task; 2=task,config).
:param list contexts: List of where this plugin is configurable. Can be 'task', 'root', or None
:param string category: The type of plugin. Can be one of the task phases.
Defaults to the package name containing the plugin.
"""
dict.__init__(self)
if groups is None:
groups = []
if name is None:
# Convention is to take camel-case class name and rewrite it to an underscore form,
# e.g. 'PluginName' to 'plugin_name'
name = re.sub('[A-Z]+', lambda i: '_' + i.group(0).lower(), plugin_class.__name__).lstrip('_')
if contexts is None:
contexts = ['task']
elif isinstance(contexts, basestring):
contexts = [contexts]
if category is None and plugin_class.__module__.startswith('flexget.plugins'):
# By default look at the containing package of the plugin.
category = plugin_class.__module__.split('.')[-2]
# Check for unsupported api versions
if api_ver < 2:
warnings.warn('Api versions <2 are no longer supported. Plugin %s' % name, DeprecationWarning, stacklevel=2)
# Set basic info attributes
self.api_ver = api_ver
self.name = name
self.groups = groups
self.builtin = builtin
self.debug = debug
self.contexts = contexts
self.category = category
self.phase_handlers = {}
self.plugin_class = plugin_class
self.instance = None
if self.name in plugins:
PluginInfo.dupe_counter += 1
log.critical('Error while registering plugin %s. A plugin with the same name is already registered' %
self.name)
else:
plugins[self.name] = self
def initialize(self):
if self.instance is not None:
# We already initialized
return
# Create plugin instance
self.instance = self.plugin_class()
self.instance.plugin_info = self # give plugin easy access to its own info
self.instance.log = logging.getLogger(getattr(self.instance, "LOGGER_NAME", None) or self.name)
if hasattr(self.instance, 'schema'):
self.schema = self.instance.schema
elif hasattr(self.instance, 'validator'):
self.schema = self.instance.validator().schema()
else:
# TODO: I think plugins without schemas should not be allowed in config, maybe rethink this
self.schema = {}
if self.schema is not None:
location = '/schema/plugin/%s' % self.name
self.schema['id'] = location
config_schema.register_schema(location, self.schema)
self.build_phase_handlers()
def reset_phase_handlers(self):
"""Temporary utility method"""
self.phase_handlers = {}
self.build_phase_handlers()
# TODO: should unregister events (from flexget.event)
# this method is not used at the moment anywhere ...
raise NotImplementedError
def build_phase_handlers(self):
"""(Re)build phase_handlers in this plugin"""
for phase, method_name in phase_methods.iteritems():
if phase in self.phase_handlers:
continue
if hasattr(self.instance, method_name):
method = getattr(self.instance, method_name)
if not callable(method):
continue
# check for priority decorator
if hasattr(method, 'priority'):
handler_prio = method.priority
else:
handler_prio = DEFAULT_PRIORITY
event = add_phase_handler('plugin.%s.%s' % (self.name, phase), method, handler_prio)
# provides backwards compatibility
event.plugin = self
self.phase_handlers[phase] = event
def __getattr__(self, attr):
if attr in self:
return self[attr]
return dict.__getattribute__(self, attr)
def __setattr__(self, attr, value):
self[attr] = value
def __str__(self):
return '<PluginInfo(name=%s)>' % self.name
__repr__ = __str__
register = PluginInfo
def _strip_trailing_sep(path):
return path.rstrip("\\/")
def _get_standard_plugins_path():
"""
:returns: List of directories where plugins should be tried to load from.
"""
# Get basic path from environment
paths = []
env_path = os.environ.get('FLEXGET_PLUGIN_PATH')
if env_path:
paths = env_path.split(os.pathsep)
# Add flexget.plugins directory (core plugins)
paths.append(os.path.abspath(os.path.dirname(plugins_pkg.__file__)))
return paths
def _load_plugins_from_dirs(dirs):
"""
:param list dirs: Directories from where plugins are loaded from
"""
log.debug('Trying to load plugins from: %s' % dirs)
dirs = [path(d) for d in dirs if os.path.isdir(d)]
# add all dirs to plugins_pkg load path so that imports work properly from any of the plugin dirs
plugins_pkg.__path__ = map(_strip_trailing_sep, dirs)
for plugins_dir in dirs:
for plugin_path in plugins_dir.walkfiles('*.py'):
if plugin_path.name == '__init__.py':
continue
# Split the relative path from the plugins dir to current file's parent dir to find subpackage names
plugin_subpackages = filter(None, plugin_path.relpath(plugins_dir).parent.splitall())
module_name = '.'.join([plugins_pkg.__name__] + plugin_subpackages + [plugin_path.namebase])
try:
__import__(module_name)
except DependencyError as e:
if e.has_message():
msg = e.message
else:
msg = 'Plugin `%s` requires `%s` to load.' % (e.issued_by or module_name, e.missing or 'N/A')
if not e.silent:
log.warning(msg)
else:
log.debug(msg)
except ImportError as e:
log.critical('Plugin `%s` failed to import dependencies' % module_name)
log.exception(e)
except ValueError as e:
# Debugging #2755
log.error('ValueError attempting to import `%s` (from %s): %s', module_name, plugin_path, e)
except Exception as e:
log.critical('Exception while loading plugin %s' % module_name)
log.exception(e)
raise
else:
log.trace('Loaded module %s from %s' % (module_name, plugin_path))
if _new_phase_queue:
for phase, args in _new_phase_queue.iteritems():
log.error('Plugin %s requested new phase %s, but it could not be created at requested '
'point (before, after). Plugin is not working properly.' % (args[0], phase))
def load_plugins(extra_dirs=None):
"""
Load plugins from the standard plugin paths.
:param list extra_dirs: Extra directories from where plugins are loaded.
"""
global plugins_loaded
if not extra_dirs:
extra_dirs = []
# Add flexget.plugins directory (core plugins)
extra_dirs.extend(_get_standard_plugins_path())
start_time = time.time()
# Import all the plugins
_load_plugins_from_dirs(extra_dirs)
# Register them
fire_event('plugin.register')
# Plugins should only be registered once, remove their handlers after
remove_event_handlers('plugin.register')
# After they have all been registered, instantiate them
for plugin in plugins.values():
plugin.initialize()
took = time.time() - start_time
plugins_loaded = True
log.debug('Plugins took %.2f seconds to load' % took)
def get_plugins(phase=None, group=None, context=None, category=None, name=None, min_api=None):
"""
Query other plugins characteristics.
:param string phase: Require phase
:param string group: Plugin must belong to this group.
:param string context: Where plugin is configured, eg. (root, task)
:param string category: Type of plugin, phase names.
:param string name: Name of the plugin.
:param int min_api: Minimum api version.
:return: List of PluginInfo instances.
:rtype: list
"""
def matches(plugin):
if phase is not None and phase not in phase_methods:
raise ValueError('Unknown phase %s' % phase)
if phase and not phase in plugin.phase_handlers:
return False
if group and not group in plugin.groups:
return False
if context and not context in plugin.contexts:
return False
if category and not category == plugin.category:
return False
if name is not None and name != plugin.name:
return False
if min_api is not None and plugin.api_ver < min_api:
return False
return True
return ifilter(matches, plugins.itervalues())
def plugin_schemas(**kwargs):
"""Create a dict schema that matches plugins specified by `kwargs`"""
return {'type': 'object',
'properties': dict((p.name, {'$ref': p.schema['id']}) for p in get_plugins(**kwargs)),
'additionalProperties': False,
'error_additionalProperties': '{{message}} Only known plugin names are valid keys.',
'patternProperties': {'^_': {'title': 'Disabled Plugin'}}}
config_schema.register_schema('/schema/plugins', plugin_schemas)
def get_plugins_by_phase(phase):
"""
.. deprecated:: 1.0.3328
Use :func:`get_plugins` instead
Return an iterator over all plugins that hook :phase:
"""
warnings.warn('Deprecated API', DeprecationWarning, stacklevel=2)
if not phase in phase_methods:
raise Exception('Unknown phase %s' % phase)
return get_plugins(phase=phase)
def get_phases_by_plugin(name):
"""Return all phases plugin :name: hooks"""
return list(get_plugin_by_name(name).phase_handlers)
def get_plugins_by_group(group):
"""
.. deprecated:: 1.0.3328
Use :func:`get_plugins` instead
Return an iterator over all plugins with in specified group.
"""
warnings.warn('Deprecated API', DeprecationWarning, stacklevel=2)
return get_plugins(group=group)
def get_plugin_keywords():
"""Return iterator over all plugin keywords."""
return plugins.iterkeys()
def get_plugin_by_name(name, issued_by='???'):
"""Get plugin by name, preferred way since this structure may be changed at some point."""
if not name in plugins:
raise DependencyError(issued_by=issued_by, missing=name, message='Unknown plugin %s' % name)
return plugins[name]
| |
import json
import django_filters
from atom.ext.crispy_forms.views import FormSetMixin
from atom.ext.django_filters.filters import CrispyFilterMixin
from braces.views import (
PrefetchRelatedMixin,
SelectRelatedMixin,
SetHeadlineMixin,
UserFormKwargsMixin,
)
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.core.files.base import File
from django.http import HttpResponseBadRequest, HttpResponseRedirect, JsonResponse
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.generic import CreateView, UpdateView
from django_filters.views import FilterView
from poradnia.cases.models import Case
from poradnia.letters.settings import LETTER_RECEIVE_SECRET
from poradnia.template_mail.utils import TemplateKey, TemplateMailManager
from poradnia.users.utils import PermissionMixin
from ..forms import AttachmentForm, LetterForm, NewCaseForm
from ..models import Attachment, Letter
from .fbv import REGISTRATION_TEXT
class NewCaseCreateView(
SetHeadlineMixin, FormSetMixin, UserFormKwargsMixin, CreateView
):
model = Letter
form_class = NewCaseForm
headline = _("Create a new case")
template_name = "letters/form_new.html"
inline_model = Attachment
inline_form_cls = AttachmentForm
def formset_valid(self, form, formset, *args, **kwargs):
formset.save()
messages.success(
self.request,
_("Case about {object} created!").format(object=self.object.name),
)
self.object.client.notify(
actor=self.object.created_by,
verb="registered",
target=self.object.case,
from_email=self.object.case.get_email(),
)
if self.request.user.is_anonymous:
messages.success(
self.request, _(REGISTRATION_TEXT) % {"user": self.object.created_by}
)
return HttpResponseRedirect(self.object.case.get_absolute_url())
class LetterUpdateView(SetHeadlineMixin, FormSetMixin, UserFormKwargsMixin, UpdateView):
model = Letter
form_class = LetterForm
headline = _("Edit")
template_name = "letters/form_edit.html"
inline_model = Attachment
inline_form_cls = AttachmentForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["case"] = self.object.case
return context
def get_instance(self):
return self.object
def get_object(self):
obj = super().get_object()
if obj.created_by_id == self.request.user.pk:
obj.case.perm_check(self.request.user, "can_change_own_record")
else:
obj.case.perm_check(self.request.user, "can_change_all_record")
return obj
def get_formset_valid_message(self):
return ("Letter %(object)s updated!") % {"object": self.object}
def get_success_url(self):
return self.object.case.get_absolute_url()
def formset_valid(self, form, formset):
resp = super().formset_valid(form, formset)
self.object.send_notification(actor=self.request.user, verb="updated")
return resp
class StaffLetterFilter(CrispyFilterMixin, django_filters.FilterSet):
status = django_filters.MultipleChoiceFilter(
label=_("Status"),
# null_label=_("Any"),
choices=[("", "---------")] + Case.STATUS,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Meta:
model = Letter
fields = ["status"]
class UserLetterFilter(CrispyFilterMixin, django_filters.FilterSet):
class Meta:
model = Letter
fields = []
class LetterListView(
PermissionMixin, SelectRelatedMixin, PrefetchRelatedMixin, FilterView
):
@property
def filterset_class(self):
return StaffLetterFilter if self.request.user.is_staff else UserLetterFilter
model = Letter
paginate_by = 20
select_related = ["created_by", "modified_by", "case"]
prefetch_related = ["attachment_set"]
class ReceiveEmailView(View):
required_content_type = "multipart/form-data"
def is_allowed_recipient(self, manifest):
domain = Site.objects.get_current().domain
return any(
addr in x or domain in x
for x in manifest["headers"]["to"]
for addr in settings.LETTER_RECEIVE_WHITELISTED_ADDRESS
)
def is_autoreply(self, manifest):
return manifest["headers"].get("auto_reply_type", False)
def create_user(self, manifest):
return get_user_model().objects.get_by_email_or_create(
manifest["headers"]["from"][0]
)
def create_case(self, manifest, actor):
return self.get_case(
subject=manifest["headers"]["subject"],
addresses=manifest["headers"]["to+"],
actor=actor,
)
def refuse_letter(self, manifest):
context = {
"to": manifest["headers"]["to"],
"subject": manifest["headers"]["subject"],
}
TemplateMailManager.send(
TemplateKey.LETTER_REFUSED,
recipient_list=manifest["headers"]["from"],
context=context,
)
def create_letter(self, request, actor, case, manifest):
letter = Letter.objects.create(
name=manifest["headers"]["subject"],
created_by=actor,
created_by_is_staff=actor.is_staff,
case=case,
genre=Letter.GENRE.mail,
status=self.get_letter_status(actor=actor, case=case),
text=manifest["text"]["content"],
html="",
signature=manifest["text"]["quote"],
eml=File(self.request.FILES["eml"]),
)
for attachment in request.FILES.getlist("attachment"):
Attachment.objects.create(letter=letter, attachment=File(attachment))
return letter
def post(self, request):
if request.GET.get("secret") != LETTER_RECEIVE_SECRET:
raise PermissionDenied
if request.content_type != self.required_content_type:
return HttpResponseBadRequest(
"The request has an invalid format. "
'The acceptable format is "{}"'.format(self.required_content_type)
)
if "manifest" not in request.FILES:
return HttpResponseBadRequest(
"The request has an invalid format. Missing 'manifest' filed."
)
if "eml" not in request.FILES:
return HttpResponseBadRequest(
"The request has an invalid format. Missing 'eml' filed."
)
manifest = json.load(request.FILES["manifest"])
REFUSE_MESSAGE = (
"There is no e-mail address for the target system in the recipient field. "
)
if not self.is_allowed_recipient(manifest):
if not self.is_autoreply(manifest):
self.refuse_letter(manifest)
return HttpResponseBadRequest(
REFUSE_MESSAGE + "Notification have been send."
)
return HttpResponseBadRequest(
REFUSE_MESSAGE + "Notification have been skipped."
)
actor = self.create_user(manifest)
case = self.create_case(manifest, actor)
letter = self.create_letter(request, actor, case, manifest)
if case.status == Case.STATUS.closed and letter.status == Letter.STATUS.done:
case.update_status(reopen=True, save=False)
case.handled = actor.is_staff is True and letter.status == Letter.STATUS.done
case.update_counters()
case.save()
letter.send_notification(actor=actor, verb="created")
return JsonResponse({"status": "OK", "letter": letter.pk})
def get_case(self, subject, addresses, actor):
try:
case = Case.objects.by_addresses(addresses).get()
except Case.DoesNotExist:
case = Case.objects.create(name=subject, created_by=actor, client=actor)
actor.notify(
actor=actor, verb="registered", target=case, from_email=case.get_email()
)
return case
def get_letter_status(self, actor, case):
if actor.is_staff and not actor.has_perm("cases.can_send_to_client", case):
return Letter.STATUS.staff
else:
return Letter.STATUS.done
| |
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.authtoken.models import Token
from rest_framework.parsers import FormParser, MultiPartParser
from django.db.models import F
from django.db import IntegrityError
from silk.profiling.profiler import silk_profile
from jamjar.base.views import BaseView, authenticate
from jamjar.videos.models import Video, Edge, VideoFlag, VideoVote, JamPick
from jamjar.videos.serializers import (VideoSerializer,
ExpandedVideoSerializer,
EdgeSerializer,
JamJarVideoSerializer,
VideoFlagSerializer,
VideoVoteSerializer
)
from jamjar.concerts.serializers import ConcertSerializer
from jamjar.concerts.models import Concert
from jamjar.tasks.transcode_video import transcode_video
import re, datetime
class VideoListView(BaseView):
parser_classes = (MultiPartParser,)
serializer_class = VideoSerializer
"""
Description:
Get a list of all Videos in JamJar filtered by the following attributes:
- genres (id)
- artists (id)
- uploaders (id)
- venues (id)
A "hot" attribute can also be supplied in order to get the hot videos
as a mix of both view count and time (and soon votes)
(pass a 1 or 0)
You may pass multiple of each filter, separated with a "+".
These filters are accepted as query parameters in the GET URL, and are ANDed together.
Request:
GET /videos/?genres=1+3+6&artists=4+6&top=1
Response:
A list of all Videos meeting the criteria
"""
@authenticate
def get(self, request):
# Get our inital queryset of ALL videos (this could be big!)
queryset = Video.objects.for_user(request.user).all()
# Get all the possible filters and split them, making sure we get an
# empty list if the parameter wasn't passed
# (Django turns pluses into spaces)
genre_filters = filter(None, request.GET.get('genres', '').split(' '))
artist_filters = filter(None, request.GET.get('artists', '').split(' '))
uploader_filters = filter(None, request.GET.get('uploaders', '').split(' '))
venues_filters = filter(None, request.GET.get('venues', '').split(' '))
if genre_filters:
queryset = queryset.filter(artists__genres__in=genre_filters)
if artist_filters:
queryset = queryset.filter(artists__in=artist_filters)
if uploader_filters:
queryset = queryset.filter(user__in=uploader_filters)
if venues_filters:
queryset = queryset.filter(venue__in=venues_filters)
# Optimize queries
queryset = queryset.prefetch_related('artists',
'artists__genres',
'artists__images',
'votes',
'concert__artists',
'concert__artists__genres',
'concert__artists__images').select_related(
'user',
'concert',
'concert__venue')
# Limit this until we make this shit better
queryset = queryset[:50]
hot = int(request.GET.get('hot', 0))
if hot:
# If "hot" is true, order by hotness
queryset = queryset.order_by('-created_at', '-views')
now = datetime.datetime.now()
queryset = sorted(queryset, key= lambda v: v.hot(now), reverse=True)
expanded_serializer = ExpandedVideoSerializer(queryset, many=True, context={'request': request})
return self.success_response(expanded_serializer.data)
"""
Description:
Upload a video, dawg!
Given a video, name, concert_id, and a list of artist spotify_ids, create and upload a video!
Request:
POST /videos/
NOTE: This is Multipart/form data!!!!
The following fields are expected:
file: The file itself
name: The name of the video (user-entered)
concert: The ID of the concert for this video
artists: You will have one "artists" key/value pair for every artist on this video
yes, you may have this key MULTIPLE TIMES)
This value will be the spotify_id of the tagged artist
Response:
The fresh video data for the video, including serialized artists and user!
{
"id": 49,
"name": "drewww",
"uploaded": false,
"uuid": "dfef3693-a42f-4444-b03c-8f64e46d6b02",
"length": null,
"file_size": 38391947,
"is_private": false,
"views": 0,
"artists": [
{
"id": 1,
"name": "Bonobo",
"spotify_id": "0cmWgDlu9CwTgxPhf403hb",
"genres": [
"chill-out",
"downtempo",
"ninja",
"nu jazz",
"trip hop"
],
"images": [
{
"url": "https://i.scdn.co/image/10e789fe4259875a0bb7f5a41f13a2c5815b4635",
"height": 667,
"width": 1000
},
{
"url": "https://i.scdn.co/image/47ca8ff0c123abac4e424fa203c9bdd14685c69e",
"height": 427,
"width": 640
},
{
"url": "https://i.scdn.co/image/1478b2e2861c22dcfa152e67581b41659ea02b47",
"height": 133,
"width": 200
},
{
"url": "https://i.scdn.co/image/165e00daafa1ae302a549c01a7a50d59e3583fb1",
"height": 43,
"width": 64
}
],
"unofficial": false
}
],
"web_src": null,
"hls_src": null,
"thumb_src": null,
"concert": 1,
"user": {
"id": 1,
"username": "test",
"email": "test@user.com",
"first_name": "Test",
"last_name": "User",
"full_name": "Test User"
}
}
"""
@authenticate
def post(self, request):
# Make sure we have all of the proper attributes
context = self.get_serializer_context()
self.serializer = self.serializer_class(data=request.data, context=context)
if not self.serializer.is_valid():
return self.error_response(self.serializer.errors, 400)
video_fh = self.serializer.validated_data.pop('file')
# Create the video object so we can get the UUID and paths
video = self.serializer.save()
# This will synchronously upload the video to a temp directory then
# queue a job to:
# 1) transcode the video for ios and web
# 2) upload the video to s3
#
# both of these things happen outside of the realm of this request!
try:
tmp_src = video.process_upload(video_fh)
except Exception, e:
video.delete()
return self.error_response(str(e), 400)
# tmp_src is where these are stored on disk pending transcode + s3 upload
# request.data['tmp_src'] = video_paths['tmp_src']
# request.data['hls_src'] = video_paths['hls_src']
# request.data['web_src'] = video_paths['web_src']
# request.data['thumb_src'] = video_paths['thumb_src']
# do this async. TODO : change lilo to use Integers for the video_id field
transcode_video.delay(video.id)
expanded_serializer = ExpandedVideoSerializer(video, context={'request': request})
return self.success_response(expanded_serializer.data)
class VideoDetailsView(BaseView):
serializer_class = VideoSerializer
model = Video
@authenticate
def get(self, request, id):
# Attempt to get the video
self.video = self.get_object_or_404(Video.all_objects, id=id)
if (self.video.is_private and request.user.id != self.video.user.id) or (self.video.uploaded == False):
return self.error_response("Video with id {} not found".format(id), 404)
# Serialize the result and return it
self.serializer = JamJarVideoSerializer(self.video, context={'request': request})
return self.success_response(self.serializer.data)
@authenticate
def put(self, request, id):
# Attempt to get the video
self.video = self.get_object_or_404(self.model, id=id)
# Initialize the serializer with our data
self.serializer = self.get_serializer(self.video, data=request.data, context={'request': request})
# Validate the data
if not self.serializer.is_valid():
return self.error_response(self.serializer.errors, 400)
# Errthang looks good. Save it to the db
video = self.serializer.save()
return self.success_response(self.serializer.data)
@authenticate
def delete(self, request, id):
# Attempt to get the video
self.video = self.get_object_or_404(self.model, id=id)
# Serialize the result and return it
self.video.delete()
return self.success_response("Video with id {} successfully deleted.".format(id))
class VideoWatchView(BaseView):
model = Video
"""
Description:
Given a video id, incremement that video count. We want to make this
endpoint as cheap as possible, so we do some funky stuff here.
- We don't authenticate
- We use an F expression to both find and update the row in the DB
at the same time
Request:
POST /videos/:video_id/watching/
{}
(No data needed)
Response:
True
"""
# Don't authenticate this
#@authenticate
def post(self, request, id):
# Attempt to update the video count
self.model.objects.filter(id=id).update(views=F('views')+1)
return self.success_response(True)
class VideoFlagView(BaseView):
model = VideoFlag
serializer_class = VideoFlagSerializer
"""
Description:
Get a list of all video flags
Flag types:
'Q' - 'Quality'
'I' - 'Inappropriate'
'A' - 'Accuracy'
Request:
GET /videos/flags/
Response:
All of the video flags
"""
@authenticate
def get(self, request):
flags = VideoFlag.objects.all()
self.serializer = VideoFlagSerializer(flags, many=True)
return self.success_response(self.serializer.data)
"""
Description:
Given a video ID, flag type, and note, submit a flag for a video
Flag types:
'Q' - 'Quality'
'I' - 'Inappropriate'
'A' - 'Accuracy'
'U' - 'Report User'
Request:
POST /videos/flags/
{
"video": 3,
"flag_type": "I",
"notes": "There are boobs in this video!!"
}
Response:
The newly created video flag
"""
@authenticate
def post(self, request):
# Attempt to update the video count
self.serializer = self.get_serializer(data=request.data)
if not self.serializer.is_valid():
return self.error_response(self.serializer.errors, 400)
# Save the flag object
try:
flag = self.serializer.save()
except IntegrityError as e:
return self.error_response(str(e), 400)
return self.success_response(self.serializer.data)
class VideoVoteView(BaseView):
model = VideoVote
serializer_class = VideoVoteSerializer
"""
Description:
Given a boolean (true=upvote, false=downvote, null=unvote), record a user's vote for a video
Request:
POST /videos/vote/
{
vote: true/false/null,
video: video_id
}
Response:
True
"""
@authenticate
def post(self, request):
self.serializer = self.get_serializer(data=request.data)
if not self.serializer.is_valid():
return self.error_response(self.serializer.errors, 400)
data = self.serializer.validated_data
VideoVote.objects.update_or_create(user_id=data['user_id'], video_id=data['video'].id, defaults={'vote': data['vote']})
return self.success_response(True)
class JamPickView(BaseView):
serializer_class = VideoSerializer
"""
Description:
Get a list of curated videos, curated by the dopest curration team everrr
Request:
GET /videos/jampicks/
Response:
A list of all current jampicks
"""
# @authenticate
@silk_profile(name='Get JamPicks')
def get(self, request):
# Get all da videos that have a non-null jampick
queryset = Video.objects.for_user(request.user).filter(jampick__isnull=False)
queryset = queryset.select_related('user', 'concert', 'concert__venue').prefetch_related('artists', 'artists__genres', 'artists__images', 'concert__artists')
queryset = queryset.order_by('jampick__id')
expanded_serializer = ExpandedVideoSerializer(queryset, many=True, context={'request': request})
return self.success_response(expanded_serializer.data)
| |
import logging
from typing import Iterable, Optional, List
from collections import defaultdict
from ..order.ordertype import OrderTypes
from ..utils import (
calculate_unmatched_exposure,
calculate_matched_exposure,
STRATEGY_NAME_HASH_LENGTH,
)
from ..order.order import BaseOrder, OrderStatus
logger = logging.getLogger(__name__)
# https://www.betfair.com/aboutUs/Betfair.Charges/#charges6
IMPLIED_COMMISSION_RATE = 0.03
PENDING_STATUS = [
OrderStatus.PENDING,
OrderStatus.VIOLATION,
OrderStatus.EXPIRED,
]
class Blotter:
"""
Simple and fast class to hold all orders for
a particular market.
`customer_order_ref` used as the key and various
caches available for faster access.
blotter["abc"] = <Order> # set
"abc" in blotter # contains
orders = [o for o in blotter] # iter
order = blotter["abc"] # get
"""
def __init__(self, market_id: str):
self.market_id = market_id
self.active = False
self._orders = {} # {Order.id: Order}
# cached lists/dicts for faster lookup
self._trades = defaultdict(list) # {Trade.id: [Order,]}
self._bet_id_lookup = {} # {Order.bet_id: Order, }
self._live_orders = []
self._strategy_orders = defaultdict(list)
self._strategy_selection_orders = defaultdict(list)
def get_order_bet_id(self, bet_id: str) -> Optional[BaseOrder]:
try:
return self._bet_id_lookup[bet_id]
except KeyError:
return
def strategy_orders(
self,
strategy,
order_status: Optional[List[OrderStatus]] = None,
matched_only: Optional[bool] = None,
) -> list:
"""Returns all orders related to a strategy."""
orders = self._strategy_orders[strategy]
if order_status:
orders = [o for o in orders if o.status in order_status]
if matched_only:
orders = [o for o in orders if o.size_matched > 0]
return orders
def strategy_selection_orders(
self,
strategy,
selection_id: int,
handicap: float = 0,
order_status: Optional[List[OrderStatus]] = None,
matched_only: Optional[bool] = None,
) -> list:
"""Returns all orders related to a strategy selection."""
orders = self._strategy_selection_orders[(strategy, selection_id, handicap)]
if order_status:
orders = [o for o in orders if o.status in order_status]
if matched_only:
orders = [o for o in orders if o.size_matched > 0]
return orders
@property
def live_orders(self) -> Iterable:
return iter(list(self._live_orders))
@property
def has_live_orders(self) -> bool:
return bool(self._live_orders)
def process_closed_market(self, market_book) -> None:
number_of_winners = len(
[runner for runner in market_book.runners if runner.status == "WINNER"]
)
for order in self:
for runner in market_book.runners:
if (order.selection_id, order.handicap) == (
runner.selection_id,
runner.handicap,
):
order.runner_status = runner.status
order.market_type = market_book.market_definition.market_type
order.each_way_divisor = (
market_book.market_definition.each_way_divisor
)
if number_of_winners > market_book.number_of_winners:
order.number_of_dead_heat_winners = number_of_winners
def process_cleared_orders(self, cleared_orders) -> list:
for cleared_order in cleared_orders.orders:
order_id = cleared_order.customer_order_ref[STRATEGY_NAME_HASH_LENGTH + 1 :]
if order_id in self:
self[order_id].cleared_order = cleared_order
return [order for order in self]
""" position """
def market_exposure(self, strategy, market_book) -> float:
"""Returns worst-case exposure for market, which is the maximum potential loss (negative),
arising from the worst race outcome, or the minimum potential profit (positive).
"""
orders = self.strategy_orders(strategy)
runners = set([order.lookup for order in orders])
worst_possible_profits = [
self.get_exposures(strategy, lookup) for lookup in runners
]
worst_possible_profits_on_loses = [
wpp["worst_possible_profit_on_lose"] for wpp in worst_possible_profits
]
differences = [
wpp["worst_possible_profit_on_win"] - wpp["worst_possible_profit_on_lose"]
for wpp in worst_possible_profits
] + (market_book.number_of_active_runners - len(runners)) * [0]
worst_differences = sorted(differences)[: market_book.number_of_winners]
return sum(worst_possible_profits_on_loses) + sum(worst_differences)
def selection_exposure(self, strategy, lookup: tuple) -> float:
"""Returns strategy/selection exposure, which is the worse-case loss arising
from the selection either winning or losing. Can be positive or zero.
positive = potential loss
zero = no potential loss
"""
exposures = self.get_exposures(strategy=strategy, lookup=lookup)
exposure = -min(
exposures["worst_possible_profit_on_win"],
exposures["worst_possible_profit_on_lose"],
)
return max(exposure, 0.0)
def get_exposures(self, strategy, lookup: tuple, exclusion=None) -> dict:
"""Returns strategy/selection exposures as a dict."""
mb, ml = [], [] # matched bets, (price, size)
ub, ul = [], [] # unmatched bets, (price, size)
moc_win_liability = 0.0
moc_lose_liability = 0.0
for order in self.strategy_selection_orders(strategy, *lookup[1:]):
if order == exclusion:
continue
if order.status in PENDING_STATUS:
continue
if order.order_type.ORDER_TYPE == OrderTypes.LIMIT:
_size_matched = order.size_matched # cache
if _size_matched:
if order.side == "BACK":
mb.append((order.average_price_matched, _size_matched))
else:
ml.append((order.average_price_matched, _size_matched))
if not order.complete:
_size_remaining = order.size_remaining # cache
if order.order_type.price and _size_remaining:
if order.side == "BACK":
ub.append((order.order_type.price, _size_remaining))
else:
ul.append((order.order_type.price, _size_remaining))
elif order.order_type.ORDER_TYPE in (
OrderTypes.LIMIT_ON_CLOSE,
OrderTypes.MARKET_ON_CLOSE,
):
if order.side == "BACK":
moc_lose_liability -= order.order_type.liability
else:
moc_win_liability -= order.order_type.liability
else:
raise ValueError(
"Unexpected order type: %s" % order.order_type.ORDER_TYPE
)
matched_exposure = calculate_matched_exposure(mb, ml)
unmatched_exposure = calculate_unmatched_exposure(ub, ul)
worst_possible_profit_on_win = (
matched_exposure[0] + unmatched_exposure[0] + moc_win_liability
)
worst_possible_profit_on_lose = (
matched_exposure[1] + unmatched_exposure[1] + moc_lose_liability
)
return {
"matched_profit_if_win": matched_exposure[0],
"matched_profit_if_lose": matched_exposure[1],
"worst_potential_unmatched_profit_if_win": unmatched_exposure[0],
"worst_potential_unmatched_profit_if_lose": unmatched_exposure[1],
"worst_possible_profit_on_win": worst_possible_profit_on_win,
"worst_possible_profit_on_lose": worst_possible_profit_on_lose,
}
""" getters / setters """
def complete_order(self, order) -> None:
self._live_orders.remove(order)
def has_order(self, customer_order_ref: str) -> bool:
return customer_order_ref in self._orders
def has_trade(self, trade_id: str) -> bool:
return trade_id in self._trades
__contains__ = has_order
def __setitem__(self, customer_order_ref: str, order) -> None:
self.active = True
self._orders[customer_order_ref] = order
self._bet_id_lookup[order.bet_id] = order
self._live_orders.append(order)
self._trades[order.trade.id].append(order)
self._strategy_orders[order.trade.strategy].append(order)
self._strategy_selection_orders[
(order.trade.strategy, *order.lookup[1:])
].append(order)
def __getitem__(self, customer_order_ref: str):
return self._orders[customer_order_ref]
def __iter__(self) -> Iterable[BaseOrder]:
return iter(list(self._orders.values()))
def __len__(self) -> int:
return len(self._orders)
| |
# coding: utf-8
# Deep Learning
# =============
#
# Assignment 4
# ------------
#
# Previously in `2_fullyconnected.ipynb` and `3_regularization.ipynb`, we trained fully connected networks to classify [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) characters.
#
# The goal of this assignment is make the neural network convolutional.
# In[ ]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import argparse
import neuralNetwork as nn
import os
from datetime import datetime
print("Tensorflow version:", tf.__version__)
parser = argparse.ArgumentParser(description='Shape Variation Analyzer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--pickle', help='Pickle file, check the script pickleData to generate this file.', required=True)
parser.add_argument('--out', help='Output dirname, default=./out', default="./out")
parser.add_argument('--model', help='Output modelname, default=model, the output name will be <outdir>/model-<num step>', default="model")
parser.add_argument('--learning_rate', help='Learning rate, default=1e-5', type=float, default=1e-5)
parser.add_argument('--decay_rate', help='decay rate, default=0.96', type=float, default=0.96)
parser.add_argument('--decay_steps', help='decay steps, default=10000', type=int, default=10000)
parser.add_argument('--batch_size', help='Batch size for evaluation, default=32', type=int, default=32)
parser.add_argument('--reg_constant', help='Regularization constant, default=0.0', type=float, default=0.0)
parser.add_argument('--num_epochs', help='Number of epochs', type=int, default=10)
parser.add_argument('--num_labels', help='Number of labels', type=int, default=7)
args = parser.parse_args()
pickle_file = args.pickle
outvariablesdirname = args.out
modelname = args.model
learning_rate = args.learning_rate
decay_rate = args.decay_rate
decay_steps = args.decay_steps
batch_size = args.batch_size
num_epochs = args.num_epochs
reg_constant = args.reg_constant
num_labels = args.num_labels
f = open(pickle_file, 'rb')
data = pickle.load(f)
train_dataset = data["train_dataset"]
train_labels = data["train_labels"]
valid_dataset = data["valid_dataset"]
valid_labels = data["valid_labels"]
# test_dataset = data["test_dataset"]
# test_labels = data["test_labels"]
# img_head = data["img_head"]
# img_size = img_head["sizes"]
# img_size = [img_size[3], img_size[2], img_size[1], img_size[0]]
# Reformat into a TensorFlow-friendly shape:
# - convolutions need the image data formatted as a cube (width by height by #channels)
# - labels as float 1-hot encodings.
# In[ ]:
# in_depth = img_size[3] #zdim
# in_height = img_size[2] #ydim
# in_width = img_size[1] #xdim
# num_channels = img_size[0] #numchannels
# num_channels_labels = 1
# Reformat into a TensorFlow-friendly shape:
# - convolutions need the image data formatted as a cube (depth * width * height * channels)
# - We know that nrrd format
# - labels as float 1-hot encodings.
def reformat(dataset, labels):
dataset = dataset.reshape(dataset.shape[0], -1)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
# test_dataset, test_labels = reformat(test_dataset, test_labels)
size_features = train_dataset.shape[1]
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
# print('Test set', test_dataset.shape, test_labels.shape)
print('learning_rate', learning_rate)
print('decay_rate', decay_rate)
print('decay_steps', decay_steps)
print('batch_size', batch_size)
print('num_epochs', num_epochs)
# Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
# In[ ]:
#batch_size = 64
# patch_size = 8
# depth = 32
# depth2 = 64
# num_hidden = 256
# stride = [1, 1, 1, 1]
# def evaluate_accuracy(prediction, labels):
# accuracy = tf.reduce_sum(tf.squared_difference(prediction, labels))
# return accuracy.eval()
graph = tf.Graph()
with graph.as_default():
# run inference on the input data
tf_train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset)
tf_train_labels = tf.data.Dataset.from_tensor_slices(train_labels)
dataset = tf.data.Dataset.zip((tf_train_dataset, tf_train_labels))
dataset = dataset.repeat(args.num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
next_train_data, next_train_labels = iterator.get_next()
tf_valid_dataset = tf.data.Dataset.from_tensor_slices(valid_dataset)
tf_valid_labels = tf.data.Dataset.from_tensor_slices(valid_labels)
valid_dataset = tf.data.Dataset.zip((tf_valid_dataset, tf_valid_labels))
valid_dataset = valid_dataset.repeat(1)
valid_dataset = valid_dataset.batch(batch_size)
valid_iterator = valid_dataset.make_initializable_iterator()
next_valid_data, next_valid_labels = valid_iterator.get_next()
x = tf.placeholder(tf.float32,shape=(None, size_features))
y_ = tf.placeholder(tf.float32, shape=(None, num_labels))
keep_prob = tf.placeholder(tf.float32)
#tf_valid_dataset = tf.constant(valid_dataset)
# tf_test_dataset = tf.constant(test_dataset)
y_conv = nn.inference(x, size_features, num_labels=num_labels, keep_prob=keep_prob, batch_size=batch_size, is_training=True)
# calculate the loss from the results of inference and the labels
loss = nn.loss(y_conv, y_)
tf.summary.scalar(loss.op.name, loss)
#intersection_sum, label_sum, example_sum, precision = nn.evaluation(y_conv, y_)
#tf.summary.scalar ("Precision op", precision)
# setup the training operations
#train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
# setup the summary ops to use TensorBoard
# setup the training operations
#train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
logits = tf.nn.softmax(y_conv)
accuracy_eval = nn.evaluation(logits, y_)
tf.summary.scalar("accuracy_0", accuracy_eval[0])
tf.summary.scalar("accuracy_1", accuracy_eval[1])
auc_eval,fn_eval,fp_eval,tn_eval,tp_eval = nn.metrics(logits, y_)
tf.summary.scalar("auc_0", auc_eval[0])
tf.summary.scalar("auc_1", auc_eval[1])
tf.summary.scalar("fn_eval", fn_eval[1])
tf.summary.scalar("fp_eval", fp_eval[1])
tf.summary.scalar("tn_eval", tn_eval[1])
tf.summary.scalar("tp_eval", tp_eval[1])
summary_op = tf.summary.merge_all()
# intersection_sum, label_sum, example_sum = evaluation(y_conv, y_)
# valid_prediction = model(tf_valid_dataset)
#cross_entropy = tf.reduce_sum(tf.squared_difference(y_conv, y_))
#regularizers = tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(W_fc2)
#cross_entropy += 0.1 * regularizers
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#train_step = tf.train.GradientDescentOptimizer(1e-4).minimize(cross_entropy)
# accuracy = cross_entropy
# valid_prediction = model(tf_valid_dataset)
# evaluation(valid_prediction)
# test_prediction = model(tf_test_dataset)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
saver = tf.train.Saver()
# specify where to write the log files for import to TensorBoard
now = datetime.now()
summary_writer = tf.summary.FileWriter(os.path.join(outvariablesdirname, now.strftime("%Y%m%d-%H%M%S")), sess.graph)
sess.run([iterator.initializer])
step = 0
while True:
try:
batch_data, batch_labels = sess.run([next_train_data, next_train_labels])
_, loss_value, summary, accuracy, auc = sess.run([train_step, loss, summary_op, accuracy_eval, auc_eval], feed_dict={x: batch_data, y_: batch_labels, keep_prob: 0.5})
if step % 100 == 0:
print('OUTPUT: Step %d: loss = %.3f' % (step, loss_value))
print('Accuracy = %.3f, Auc = %.3f ' % (accuracy[0], auc[0]))
# output some data to the log files for tensorboard
summary_writer.add_summary(summary, step)
summary_writer.flush()
# less frequently output checkpoint files. Used for evaluating the model
if step % 1000 == 0:
save_path = saver.save(sess, os.path.join(outvariablesdirname, modelname), global_step=step)
sess.run([valid_iterator.initializer])
while True:
try:
batch_valid_data, batch_valid_labels = sess.run([next_valid_data, next_valid_labels])
_, accuracy, auc = sess.run([y_conv, accuracy_eval, auc_eval], feed_dict={x: batch_valid_data, y_: batch_valid_labels, keep_prob: 1})
print('Validation accuracy = %.3f, Auc = %.3f ' % (accuracy[0], auc[0]))
except tf.errors.OutOfRangeError:
break
step += 1
except tf.errors.OutOfRangeError:
break
outmodelname = os.path.join(outvariablesdirname, modelname)
print('Step:', step)
print('Saving model:', outmodelname)
saver.save(sess, outmodelname, global_step=step)
| |
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <knight@baldmt.com>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and
exits with a status of 2. If a condition argument is supplied, the test
fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
class MatchError(object):
def __init__(self, message):
self.message = message
def __nonzero__(self):
return False
def __bool__(self):
return False
def match_exact(lines=None, matches=None):
"""
Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return MatchError("Mismatch at line %d\n- %s\n+ %s\n" %
(i+1, matches[i], lines[i]))
if len(lines) < len(matches):
return MatchError("Missing lines at line %d\n- %s" %
(len(lines), "\n- ".join(matches[len(lines):])))
if len(lines) > len(matches):
return MatchError("Extra lines at line %d\n+ %s" %
(len(matches), "\n+ ".join(lines[len(matches):])))
return 1
def match_re(lines=None, res=None):
"""
Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
for i in range(min(len(lines), len(res))):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return MatchError("Mismatch at line %d\n- %s\n+ %s\n" %
(i+1, res[i], lines[i]))
if len(lines) < len(res):
return MatchError("Missing lines at line %d\n- %s" %
(len(lines), "\n- ".join(res[len(lines):])))
if len(lines) > len(res):
return MatchError("Extra lines at line %d\n+ %s" %
(len(res), "\n+ ".join(lines[len(res):])))
return 1
class TestCmd:
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program, inpath)
self.verbose_set(verbose)
if match is None:
self.match_func = match_re
else:
self.match_func = match
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
env = os.environ.get('PRESERVE')
if env:
self._preserve['pass_test'] = env
self._preserve['fail_test'] = env
self._preserve['no_result'] = env
else:
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['pass_test'] = env
env = os.environ.get('PRESERVE_FAIL')
if env is not None:
self._preserve['fail_test'] = env
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['PRESERVE_NO_RESULT'] = env
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""
Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
TestCmd environment was created, temporary working directories are not
removed. If any of the environment variables PRESERVE_PASS,
PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd
environment was created, then temporary working directories are not
removed if the test passed, failed or had no result, respectively.
Temporary working directories are also preserved for conditions
specified via the preserve method.
Typically, this method is not called directly, but is used when the
script exits to clean up temporary working directories as appropriate
for the exit status.
"""
if not self._dirlist:
return
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print("Preserved directory %s" % dir)
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors=1)
self._dirlist = []
self.workdir = None
os.chdir(self._cwd)
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def description_set(self, description):
"""Set the description of the functionality being tested."""
self.description = description
def fail_test(self, condition=True, function=None, skip=0):
"""Cause the test to fail."""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def match(self, lines, matches):
"""Compare actual and expected file contents."""
return self.match_func(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file content exactly."""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare file content with a regular expression."""
return match_re(lines, res)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run."""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass."""
if not condition:
return
self.condition = 'pass_test'
pass_test(self, condition, function)
def preserve(self, *conditions):
"""
Arrange for the temporary working directories for the specified
TestCmd environment to be preserved for one or more conditions. If no
conditions are specified, arranges for the temporary working
directories to be preserved for all conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program, inpath):
"""Set the executable program or script to be tested."""
if not inpath and program and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
self.program = program
def read(self, file, mode='rb'):
"""
Reads and returns the contents of the specified file name. The file
name may be a list, in which case the elements are concatenated with
the os.path.join() method. The file is assumed to be under the
temporary working directory unless it is an absolute path name. The I/O
mode for the file may be specified and must begin with an 'r'. The
default is 'rb' (binary read).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
return open(file, mode).read()
def run(self, program=None, arguments=None, chdir=None, stdin=None,
universal_newlines=True):
"""
Runs a test of the program or script for the test environment.
Standard output and error output are saved for future retrieval via the
stdout() and stderr() methods.
'universal_newlines' parameter controls how the child process
input/output streams are opened as defined for the same named Python
subprocess.POpen constructor parameter.
"""
if chdir:
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
else:
chdir = self.workdir
cmd = []
if program and program[0]:
if program[0] != self.program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
cmd += program
else:
cmd += self.program
if arguments:
cmd += arguments.split(" ")
if self.verbose:
sys.stderr.write(join(cmd, " ") + "\n")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=chdir,
universal_newlines=universal_newlines)
if stdin:
if type(stdin) is ListType:
stdin = "".join(stdin)
out, err = p.communicate(stdin)
self._stdout.append(out)
self._stderr.append(err)
self.status = p.returncode
if self.verbose:
sys.stdout.write(self._stdout[-1])
sys.stderr.write(self._stderr[-1])
def stderr(self, run=None):
"""
Returns the error output from the specified run number. If there is
no specified run number, then returns the error output of the last run.
If the run number is less than zero, then returns the error output from
that many runs back from the current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run -= 1
if run < 0:
return ''
return self._stderr[run]
def stdout(self, run=None):
"""
Returns the standard output from the specified run number. If there
is no specified run number, then returns the standard output of the
last run. If the run number is less than zero, then returns the
standard output from that many runs back from the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run -= 1
if run < 0:
return ''
return self._stdout[run]
def subdir(self, *subdirs):
"""
Create new subdirectories under the temporary working directory, one
for each argument. An argument may be a list, in which case the list
elements are concatenated using the os.path.join() method.
Subdirectories multiple levels deep must be created using a separate
argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if type(sub) is ListType:
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except:
pass
else:
count += 1
return count
def unlink(self, file):
"""
Unlinks the specified file name. The file name may be a list, in
which case the elements are concatenated using the os.path.join()
method. The file is assumed to be under the temporary working directory
unless it is an absolute path name.
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level."""
self.verbose = verbose
def workdir_set(self, path):
"""
Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if os.path.isabs(path):
self.workdir = path
else:
if path != None:
if path == '':
path = tempfile.mktemp()
if path != None:
os.mkdir(path)
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
# We would like to set self.workdir like this:
# self.workdir = path
# But symlinks in the path will report things differently from
# os.getcwd(), so chdir there and back to fetch the canonical
# path.
cwd = os.getcwd()
os.chdir(path)
self.workdir = os.getcwd()
os.chdir(cwd)
else:
self.workdir = None
def workpath(self, *args):
"""
Returns the absolute path name to a subdirectory or file within the
current temporary working directory. Concatenates the temporary working
directory name with the specified arguments using os.path.join().
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def writable(self, top, write):
"""
Make the specified directory tree writable (write == 1) or not
(write == None).
"""
def _walk_chmod(arg, dirname, names):
st = os.stat(dirname)
os.chmod(dirname, arg(st[stat.ST_MODE]))
for name in names:
fullname = os.path.join(dirname, name)
st = os.stat(fullname)
os.chmod(fullname, arg(st[stat.ST_MODE]))
_mode_writable = lambda mode: stat.S_IMODE(mode|0200)
_mode_non_writable = lambda mode: stat.S_IMODE(mode&~0200)
if write:
f = _mode_writable
else:
f = _mode_non_writable
try:
os.path.walk(top, _walk_chmod, f)
except:
pass # Ignore any problems changing modes.
def write(self, file, content, mode='wb'):
"""
Writes the specified content text (second argument) to the specified
file name (first argument). The file name may be a list, in which case
the elements are concatenated using the os.path.join() method. The file
is created under the temporary working directory. Any subdirectories in
the path must already exist. The I/O mode for the file may be specified
and must begin with a 'w'. The default is 'wb' (binary write).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
open(file, mode).write(content)
| |
from __future__ import absolute_import
from django.utils.timezone import get_fixed_timezone, utc
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, \
datetime_to_timestamp
from zerver.models import Realm, UserProfile, Client, get_realm, \
get_user_profile_by_email
from analytics.lib.counts import CountStat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import RealmCount, UserCount, BaseCount, \
FillState, last_successful_fill
from analytics.views import stats, get_chart_data, sort_by_totals, \
sort_client_labels, rewrite_client_arrays
from datetime import datetime, timedelta
import mock
import ujson
from six.moves import range
from typing import List, Dict
class TestStatsEndpoint(ZulipTestCase):
def test_stats(self):
# type: () -> None
self.user = get_user_profile_by_email('hamlet@zulip.com')
self.login(self.user.email)
result = self.client_get('/stats')
self.assertEqual(result.status_code, 200)
# Check that we get something back
self.assert_in_response("Zulip Analytics for", result)
class TestGetChartData(ZulipTestCase):
def setUp(self):
# type: () -> None
self.realm = get_realm('zulip')
self.user = get_user_profile_by_email('hamlet@zulip.com')
self.login(self.user.email)
self.end_times_hour = [ceiling_to_hour(self.realm.date_created) + timedelta(hours=i)
for i in range(4)]
self.end_times_day = [ceiling_to_day(self.realm.date_created) + timedelta(days=i)
for i in range(4)]
def data(self, i):
# type: (int) -> List[int]
return [0, 0, i, 0]
def insert_data(self, stat, realm_subgroups, user_subgroups):
# type: (CountStat, List[str], List[str]) -> None
if stat.frequency == CountStat.HOUR:
insert_time = self.end_times_hour[2]
fill_time = self.end_times_hour[-1]
if stat.frequency == CountStat.DAY:
insert_time = self.end_times_day[2]
fill_time = self.end_times_day[-1]
RealmCount.objects.bulk_create([
RealmCount(property=stat.property, subgroup=subgroup, end_time=insert_time,
value=100+i, realm=self.realm)
for i, subgroup in enumerate(realm_subgroups)])
UserCount.objects.bulk_create([
UserCount(property=stat.property, subgroup=subgroup, end_time=insert_time,
value=200+i, realm=self.realm, user=self.user)
for i, subgroup in enumerate(user_subgroups)])
FillState.objects.create(property=stat.property, end_time=fill_time, state=FillState.DONE)
def test_number_of_humans(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.GAUGE,
'realm': {'bot': self.data(100), 'human': self.data(101)},
'display_order': None,
'result': 'success',
})
def test_messages_sent_over_time(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:is_bot:hour']
self.insert_data(stat, ['true', 'false'], ['false'])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_over_time'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_hour],
'frequency': CountStat.HOUR,
'interval': CountStat.HOUR,
'realm': {'bot': self.data(100), 'human': self.data(101)},
'user': {'human': self.data(200)},
'display_order': None,
'result': 'success',
})
def test_messages_sent_by_message_type(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:message_type:day']
self.insert_data(stat, ['public_stream', 'private_message'],
['public_stream', 'private_stream'])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_message_type'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.DAY,
'realm': {'Public streams': self.data(100), 'Private streams': self.data(0),
'Private messages': self.data(101), 'Group private messages': self.data(0)},
'user': {'Public streams': self.data(200), 'Private streams': self.data(201),
'Private messages': self.data(0), 'Group private messages': self.data(0)},
'display_order': ['Private messages', 'Public streams', 'Private streams', 'Group private messages'],
'result': 'success',
})
def test_messages_sent_by_client(self):
# type: () -> None
stat = COUNT_STATS['messages_sent:client:day']
client1 = Client.objects.create(name='client 1')
_client1 = Client.objects.create(name='_client 1')
client2 = Client.objects.create(name='client 2')
client3 = Client.objects.create(name='client 3')
_client3 = Client.objects.create(name='_client 3')
client4 = Client.objects.create(name='client 4')
self.insert_data(stat, [client4.id, client3.id, client2.id],
[client1.id, _client1.id, client4.id, _client3.id])
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_client'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data, {
'msg': '',
'end_times': [datetime_to_timestamp(dt) for dt in self.end_times_day],
'frequency': CountStat.DAY,
'interval': CountStat.DAY,
'realm': {'client 4': self.data(100), 'client 3': self.data(101),
'client 2': self.data(102)},
'user': {'client 1': self.data(401), 'client 4': self.data(202),
'client 3': self.data(203)},
'display_order': ['client 1', 'client 2', 'client 3', 'client 4'],
'result': 'success',
})
def test_include_empty_subgroups(self):
# type: () -> None
FillState.objects.create(
property='active_users:is_bot:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {'human': [0], 'bot': [0]})
self.assertFalse('user' in data)
FillState.objects.create(
property='messages_sent:is_bot:hour', end_time=self.end_times_hour[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_over_time'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {'human': [0], 'bot': [0]})
self.assertEqual(data['user'], {})
FillState.objects.create(
property='messages_sent:message_type:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_message_type'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {
'Public streams': [0], 'Private streams': [0], 'Private messages': [0], 'Group private messages': [0]})
self.assertEqual(data['user'], {
'Public streams': [0], 'Private streams': [0], 'Private messages': [0], 'Group private messages': [0]})
FillState.objects.create(
property='messages_sent:client:day', end_time=self.end_times_day[0], state=FillState.DONE)
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'messages_sent_by_client'})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['realm'], {})
self.assertEqual(data['user'], {})
def test_start_and_end(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
end_time_timestamps = [datetime_to_timestamp(dt) for dt in self.end_times_day]
# valid start and end
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'start': end_time_timestamps[1],
'end': end_time_timestamps[2]})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['end_times'], end_time_timestamps[1:3])
self.assertEqual(data['realm'], {'bot': [0, 100], 'human': [0, 101]})
# start later then end
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'start': end_time_timestamps[2],
'end': end_time_timestamps[1]})
self.assert_json_error_contains(result, 'Start time is later than')
def test_min_length(self):
# type: () -> None
stat = COUNT_STATS['active_users:is_bot:day']
self.insert_data(stat, ['true', 'false'], [])
# test min_length is too short to change anything
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'min_length': 2})
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['end_times'], [datetime_to_timestamp(dt) for dt in self.end_times_day])
self.assertEqual(data['realm'], {'bot': self.data(100), 'human': self.data(101)})
# test min_length larger than filled data
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans',
'min_length': 5})
self.assert_json_success(result)
data = ujson.loads(result.content)
end_times = [ceiling_to_day(self.realm.date_created) + timedelta(days=i) for i in range(-1, 4)]
self.assertEqual(data['end_times'], [datetime_to_timestamp(dt) for dt in end_times])
self.assertEqual(data['realm'], {'bot': [0]+self.data(100), 'human': [0]+self.data(101)})
def test_non_existent_chart(self):
# type: () -> None
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'does_not_exist'})
self.assert_json_error_contains(result, 'Unknown chart name')
def test_analytics_not_running(self):
# type: () -> None
# try to get data for a valid chart, but before we've put anything in the database
# (e.g. before update_analytics_counts has been run)
with mock.patch('logging.warning'):
result = self.client_get('/json/analytics/chart_data',
{'chart_name': 'number_of_humans'})
self.assert_json_error_contains(result, 'No analytics data available')
class TestGetChartDataHelpers(ZulipTestCase):
# last_successful_fill is in analytics/models.py, but get_chart_data is
# the only function that uses it at the moment
def test_last_successful_fill(self):
# type: () -> None
self.assertIsNone(last_successful_fill('non-existant'))
a_time = datetime(2016, 3, 14, 19).replace(tzinfo=utc)
one_hour_before = datetime(2016, 3, 14, 18).replace(tzinfo=utc)
fillstate = FillState.objects.create(property='property', end_time=a_time,
state=FillState.DONE)
self.assertEqual(last_successful_fill('property'), a_time)
fillstate.state = FillState.STARTED
fillstate.save()
self.assertEqual(last_successful_fill('property'), one_hour_before)
def test_sort_by_totals(self):
# type: () -> None
empty = [] # type: List[int]
value_arrays = {'c': [0, 1], 'a': [9], 'b': [1, 1, 1], 'd': empty}
self.assertEqual(sort_by_totals(value_arrays), ['a', 'b', 'c', 'd'])
def test_sort_client_labels(self):
# type: () -> None
data = {'realm': {'a': [16], 'c': [15], 'b': [14], 'e': [13], 'd': [12], 'h': [11]},
'user': {'a': [6], 'b': [5], 'd': [4], 'e': [3], 'f': [2], 'g': [1]}}
self.assertEqual(sort_client_labels(data), ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
class TestTimeRange(ZulipTestCase):
def test_time_range(self):
# type: () -> None
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
TZINFO = get_fixed_timezone(-100) # 100 minutes west of UTC
# Using 22:59 so that converting to UTC and applying floor_to_{hour,day} do not commute
a_time = datetime(2016, 3, 14, 22, 59).replace(tzinfo=TZINFO)
floor_hour = datetime(2016, 3, 14, 22).replace(tzinfo=TZINFO)
floor_day = datetime(2016, 3, 14).replace(tzinfo=TZINFO)
# test start == end
self.assertEqual(time_range(a_time, a_time, CountStat.HOUR, None), [])
self.assertEqual(time_range(a_time, a_time, CountStat.DAY, None), [])
# test start == end == boundary, and min_length == 0
self.assertEqual(time_range(floor_hour, floor_hour, CountStat.HOUR, 0), [floor_hour])
self.assertEqual(time_range(floor_day, floor_day, CountStat.DAY, 0), [floor_day])
# test start and end on different boundaries
self.assertEqual(time_range(floor_hour, floor_hour+HOUR, CountStat.HOUR, None),
[floor_hour, floor_hour+HOUR])
self.assertEqual(time_range(floor_day, floor_day+DAY, CountStat.DAY, None),
[floor_day, floor_day+DAY])
# test min_length
self.assertEqual(time_range(floor_hour, floor_hour+HOUR, CountStat.HOUR, 4),
[floor_hour-2*HOUR, floor_hour-HOUR, floor_hour, floor_hour+HOUR])
self.assertEqual(time_range(floor_day, floor_day+DAY, CountStat.DAY, 4),
[floor_day-2*DAY, floor_day-DAY, floor_day, floor_day+DAY])
class TestMapArrays(ZulipTestCase):
def test_map_arrays(self):
# type: () -> None
a = {'desktop app 1.0': [1, 2, 3],
'desktop app 2.0': [10, 12, 13],
'desktop app 3.0': [21, 22, 23],
'website': [1, 2, 3],
'ZulipiOS': [1, 2, 3],
'ZulipMobile': [1, 5, 7],
'ZulipPython': [1, 2, 3],
'API: Python': [1, 2, 3],
'SomethingRandom': [4, 5, 6],
'ZulipGitHubWebhook': [7, 7, 9],
'ZulipAndroid': [64, 63, 65]}
result = rewrite_client_arrays(a)
self.assertEqual(result,
{'Old desktop app': [32, 36, 39],
'Old iOS app': [1, 2, 3],
'New iOS app': [1, 5, 7],
'Website': [1, 2, 3],
'Python API': [2, 4, 6],
'SomethingRandom': [4, 5, 6],
'GitHub webhook': [7, 7, 9],
'Android app': [64, 63, 65]})
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
import logging
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
LOG = logging.getLogger(__name__)
def get_tenant_choices(request):
tenant_choices = [('', _("Select a tenant"))]
tenants = []
try:
tenants, has_more = api.keystone.tenant_list(request)
except Exception:
msg = _('Projects could not be retrieved.')
exceptions.handle(request, msg)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
return tenant_choices
class CreateNetworkProfile(forms.SelfHandlingForm):
"""Create Network Profile form."""
name = forms.CharField(max_length=255,
label=_("Name"),
required=True)
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', _('VLAN')),
('overlay', _('Overlay')),
('trunk', _('Trunk'))],
widget=forms.Select
(attrs={'class': 'switchable',
'data-slug': 'segtype'}))
# Sub type options available for Overlay segment type
sub_type = forms.ChoiceField(label=_('Sub Type'),
choices=[('native_vxlan', _('Native VXLAN')),
('enhanced', _('Enhanced VXLAN')),
('other', _('Other'))],
required=False,
widget=forms.Select
(attrs={'class': 'switchable switched',
'data-slug': 'subtype',
'data-switch-on': 'segtype',
'data-segtype-overlay':
_("Sub Type")}))
# Sub type options available for Trunk segment type
sub_type_trunk = forms.ChoiceField(label=_('Sub Type'),
choices=[('vlan', _('VLAN'))],
required=False,
widget=forms.Select
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-trunk': _("Sub Type")}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Segment Range"),
'data-segtype-overlay':
_("Segment Range")}),
help_text=_("1-4093 for VLAN; "
"5000-10000 for Overlay"))
multicast_ip_range = forms.CharField(max_length=30,
label=_("Multicast IP Range"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-native_vxlan':
_("Multicast IP Range")}),
help_text=_("Multicast IPv4 range"
"(e.g. 224.0.0.0-"
"224.0.0.100)"))
other_subtype = forms.CharField(max_length=255,
label=_("Sub Type Value (Manual Input)"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on':
'subtype',
'data-subtype-other':
_("Sub Type Value "
"(Manual Input)")}),
help_text=_("Enter parameter (e.g. GRE)"))
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False,
widget=forms.TextInput
(attrs={'class': 'switched',
'data-switch-on': 'segtype',
'data-segtype-vlan':
_("Physical Network")}))
project = forms.ChoiceField(label=_("Project"),
required=False)
def __init__(self, request, *args, **kwargs):
super(CreateNetworkProfile, self).__init__(request, *args, **kwargs)
self.fields['project'].choices = get_tenant_choices(request)
def clean(self):
# If sub_type is 'other' or 'trunk' then
# assign this new value for sub_type
cleaned_data = super(CreateNetworkProfile, self).clean()
segment_type = cleaned_data.get('segment_type')
if segment_type == 'overlay':
sub_type = cleaned_data.get('sub_type')
if sub_type == 'other':
other_subtype = cleaned_data.get('other_subtype')
cleaned_data['sub_type'] = other_subtype
LOG.debug('subtype is now %(params)s',
{'params': other_subtype})
elif segment_type == 'trunk':
sub_type_trunk = cleaned_data.get('sub_type_trunk')
cleaned_data['sub_type'] = sub_type_trunk
LOG.debug('subtype is now %(params)s',
{'params': sub_type_trunk})
return cleaned_data
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
params = {'name': data['name'],
'segment_type': data['segment_type'],
'sub_type': data['sub_type'],
'segment_range': data['segment_range'],
'physical_network': data['physical_network'],
'multicast_ip_range': data['multicast_ip_range'],
'tenant_id': data['project']}
profile = api.neutron.profile_create(request,
**params)
msg = _('Network Profile %s '
'was successfully created.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
redirect = reverse('horizon:router:nexus1000v:index')
msg = _('Failed to create network profile %s') % data['name']
LOG.error(msg)
exceptions.handle(request, msg, redirect=redirect)
class UpdateNetworkProfile(forms.SelfHandlingForm):
"""Update Network Profile form."""
profile_id = forms.CharField(label=_("ID"),
widget=forms.HiddenInput())
name = forms.CharField(max_length=255,
label=_("Name"), required=True)
segment_type = forms.ChoiceField(label=_('Segment Type'),
choices=[('vlan', 'VLAN'),
('vxlan', 'VXLAN')],
widget=forms.Select
(attrs={'class': 'switchable'}))
segment_range = forms.CharField(max_length=255,
label=_("Segment Range"),
required=True)
physical_network = forms.CharField(max_length=255,
label=_("Physical Network"),
required=False)
project = forms.CharField(label=_("Project"), required=False)
def handle(self, request, data):
try:
LOG.debug('request = %(req)s, params = %(params)s',
{'req': request, 'params': data})
profile = api.neutron.profile_update(request,
data['profile_id'],
name=data['name'],
segment_type=
data['segment_type'],
segment_range=
data['segment_range'],
physical_network=
data['physical_network'])
msg = _('Network Profile %s '
'was successfully updated.') % data['profile_id']
LOG.debug(msg)
messages.success(request, msg)
return profile
except Exception:
LOG.error('Failed to update network profile (%s).',
data['profile_id'])
redirect = reverse('horizon:router:nexus1000v:index')
exceptions.handle(request, msg, redirect=redirect)
| |
'''tzinfo timezone information for W_minus_SU.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class W_minus_SU(DstTzInfo):
'''W_minus_SU timezone definition. See datetime.tzinfo for details'''
zone = 'W_minus_SU'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,7,2,21,30,0),
d(1917,7,1,20,29,12),
d(1917,12,27,20,29,12),
d(1918,5,31,19,29,12),
d(1918,9,15,20,29,12),
d(1919,5,31,19,29,12),
d(1919,6,30,21,29,12),
d(1919,8,15,20,0,0),
d(1921,2,14,20,0,0),
d(1921,3,20,19,0,0),
d(1921,8,31,19,0,0),
d(1921,9,30,20,0,0),
d(1922,9,30,21,0,0),
d(1930,6,20,22,0,0),
d(1981,3,31,21,0,0),
d(1981,9,30,20,0,0),
d(1982,3,31,21,0,0),
d(1982,9,30,20,0,0),
d(1983,3,31,21,0,0),
d(1983,9,30,20,0,0),
d(1984,3,31,21,0,0),
d(1984,9,29,23,0,0),
d(1985,3,30,23,0,0),
d(1985,9,28,23,0,0),
d(1986,3,29,23,0,0),
d(1986,9,27,23,0,0),
d(1987,3,28,23,0,0),
d(1987,9,26,23,0,0),
d(1988,3,26,23,0,0),
d(1988,9,24,23,0,0),
d(1989,3,25,23,0,0),
d(1989,9,23,23,0,0),
d(1990,3,24,23,0,0),
d(1990,9,29,23,0,0),
d(1991,3,30,23,0,0),
d(1991,9,29,0,0,0),
d(1992,1,19,0,0,0),
d(1992,3,28,20,0,0),
d(1992,9,26,19,0,0),
d(1993,3,27,23,0,0),
d(1993,9,25,23,0,0),
d(1994,3,26,23,0,0),
d(1994,9,24,23,0,0),
d(1995,3,25,23,0,0),
d(1995,9,23,23,0,0),
d(1996,3,30,23,0,0),
d(1996,10,26,23,0,0),
d(1997,3,29,23,0,0),
d(1997,10,25,23,0,0),
d(1998,3,28,23,0,0),
d(1998,10,24,23,0,0),
d(1999,3,27,23,0,0),
d(1999,10,30,23,0,0),
d(2000,3,25,23,0,0),
d(2000,10,28,23,0,0),
d(2001,3,24,23,0,0),
d(2001,10,27,23,0,0),
d(2002,3,30,23,0,0),
d(2002,10,26,23,0,0),
d(2003,3,29,23,0,0),
d(2003,10,25,23,0,0),
d(2004,3,27,23,0,0),
d(2004,10,30,23,0,0),
d(2005,3,26,23,0,0),
d(2005,10,29,23,0,0),
d(2006,3,25,23,0,0),
d(2006,10,28,23,0,0),
d(2007,3,24,23,0,0),
d(2007,10,27,23,0,0),
d(2008,3,29,23,0,0),
d(2008,10,25,23,0,0),
d(2009,3,28,23,0,0),
d(2009,10,24,23,0,0),
d(2010,3,27,23,0,0),
d(2010,10,30,23,0,0),
d(2011,3,26,23,0,0),
d(2011,10,29,23,0,0),
d(2012,3,24,23,0,0),
d(2012,10,27,23,0,0),
d(2013,3,30,23,0,0),
d(2013,10,26,23,0,0),
d(2014,3,29,23,0,0),
d(2014,10,25,23,0,0),
d(2015,3,28,23,0,0),
d(2015,10,24,23,0,0),
d(2016,3,26,23,0,0),
d(2016,10,29,23,0,0),
d(2017,3,25,23,0,0),
d(2017,10,28,23,0,0),
d(2018,3,24,23,0,0),
d(2018,10,27,23,0,0),
d(2019,3,30,23,0,0),
d(2019,10,26,23,0,0),
d(2020,3,28,23,0,0),
d(2020,10,24,23,0,0),
d(2021,3,27,23,0,0),
d(2021,10,30,23,0,0),
d(2022,3,26,23,0,0),
d(2022,10,29,23,0,0),
d(2023,3,25,23,0,0),
d(2023,10,28,23,0,0),
d(2024,3,30,23,0,0),
d(2024,10,26,23,0,0),
d(2025,3,29,23,0,0),
d(2025,10,25,23,0,0),
d(2026,3,28,23,0,0),
d(2026,10,24,23,0,0),
d(2027,3,27,23,0,0),
d(2027,10,30,23,0,0),
d(2028,3,25,23,0,0),
d(2028,10,28,23,0,0),
d(2029,3,24,23,0,0),
d(2029,10,27,23,0,0),
d(2030,3,30,23,0,0),
d(2030,10,26,23,0,0),
d(2031,3,29,23,0,0),
d(2031,10,25,23,0,0),
d(2032,3,27,23,0,0),
d(2032,10,30,23,0,0),
d(2033,3,26,23,0,0),
d(2033,10,29,23,0,0),
d(2034,3,25,23,0,0),
d(2034,10,28,23,0,0),
d(2035,3,24,23,0,0),
d(2035,10,27,23,0,0),
d(2036,3,29,23,0,0),
d(2036,10,25,23,0,0),
d(2037,3,28,23,0,0),
d(2037,10,24,23,0,0),
]
_transition_info = [
i(9000,0,'MMT'),
i(9060,0,'MMT'),
i(12660,3600,'MST'),
i(9060,0,'MMT'),
i(16260,7200,'MDST'),
i(12660,3600,'MST'),
i(16260,7200,'MDST'),
i(14400,5340,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(18000,7200,'MSD'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(7200,0,'EET'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(10800,0,'EEST'),
i(7200,0,'EET'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
i(14400,3600,'MSD'),
i(10800,0,'MSK'),
]
W_minus_SU = W_minus_SU()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from struct import pack, unpack
from thrift.Thrift import TException
from ..compat import BufferIO
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
NEGATIVE_SIZE = 5
SIZE_LIMIT = 6
INVALID_CLIENT_TYPE = 7
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase(object):
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = b''
have = 0
while (have < sz):
chunk = self.read(sz - have)
chunkLen = len(chunk)
have += chunkLen
buff += chunk
if chunkLen == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport(object):
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase(object):
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase(object):
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory(object):
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = BufferIO()
# Pass string argument to initialize read buffer as cStringIO.InputType
self.__rbuf = BufferIO(b'')
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = BufferIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
try:
self.__wbuf.write(buf)
except Exception as e:
# on exception reset wbuf so it doesn't contain a partial function call
self.__wbuf = BufferIO()
raise e
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = BufferIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cBytesIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None, offset=0):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = BufferIO(value)
else:
self._buffer = BufferIO()
if offset:
self._buffer.seek(offset)
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory(object):
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = BufferIO(b'')
self.__wbuf = BufferIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = BufferIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BufferIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
class TSaslClientTransport(TTransportBase, CReadableTransport):
"""
SASL transport
"""
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
def __init__(self, transport, host, service, mechanism='GSSAPI',
**sasl_kwargs):
"""
transport: an underlying transport to use, typically just a TSocket
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.transport = transport
self.sasl = SASLClient(host, service, mechanism, **sasl_kwargs)
self.__wbuf = BufferIO()
self.__rbuf = BufferIO(b'')
def open(self):
if not self.transport.isOpen():
self.transport.open()
self.send_sasl_msg(self.START, self.sasl.mechanism)
self.send_sasl_msg(self.OK, self.sasl.process())
while True:
status, challenge = self.recv_sasl_msg()
if status == self.OK:
self.send_sasl_msg(self.OK, self.sasl.process(challenge))
elif status == self.COMPLETE:
if not self.sasl.complete:
raise TTransportException(
TTransportException.NOT_OPEN,
"The server erroneously indicated "
"that SASL negotiation was complete")
else:
break
else:
raise TTransportException(
TTransportException.NOT_OPEN,
"Bad SASL negotiation status: %d (%s)"
% (status, challenge))
def send_sasl_msg(self, status, body):
header = pack(">BI", status, len(body))
self.transport.write(header + body)
self.transport.flush()
def recv_sasl_msg(self):
header = self.transport.readAll(5)
status, length = unpack(">BI", header)
if length > 0:
payload = self.transport.readAll(length)
else:
payload = ""
return status, payload
def write(self, data):
self.__wbuf.write(data)
def flush(self):
data = self.__wbuf.getvalue()
encoded = self.sasl.wrap(data)
self.transport.write(''.join((pack("!i", len(encoded)), encoded)))
self.transport.flush()
self.__wbuf = BufferIO()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self._read_frame()
return self.__rbuf.read(sz)
def _read_frame(self):
header = self.transport.readAll(4)
length, = unpack('!i', header)
encoded = self.transport.readAll(length)
self.__rbuf = BufferIO(self.sasl.unwrap(encoded))
def close(self):
self.sasl.dispose()
self.transport.close()
# based on TFramedTransport
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self._read_frame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BufferIO(prefix)
return self.__rbuf
| |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
import numpy as np
from ...tests.helper import quantity_allclose
from ... import units as u
from ... import constants
from ...time import Time
from ..builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5
from ..baseframe import frame_transform_graph
from ..sites import get_builtin_sites
from .. import (TimeAttribute,
FunctionTransformWithFiniteDifference, get_sun,
CartesianRepresentation, SphericalRepresentation,
CartesianDifferential, SphericalDifferential,
DynamicMatrixTransform)
J2000 = Time('J2000')
@pytest.mark.parametrize("dt, symmetric", [(1*u.second, True),
(1*u.year, True),
(1*u.second, False),
(1*u.year, False)])
def test_faux_lsr(dt, symmetric):
class LSR2(LSR):
obstime = TimeAttribute(default=J2000)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
ICRS, LSR2, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def icrs_to_lsr(icrs_coo, lsr_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
LSR2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def lsr_to_icrs(lsr_coo, icrs_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return icrs_frame.realize_frame(lsr_coo.data - offset)
ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
lsrc = ic.transform_to(LSR2())
assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz)
idiff = ic.cartesian.differentials['s']
ldiff = lsrc.cartesian.differentials['s']
change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s)
totchange = np.sum(change**2)**0.5
assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5)
ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr,
radial_velocity=1000*u.km/u.s)
lsrc2 = ic2.transform_to(LSR2())
tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5
assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s
def test_faux_fk5_galactic():
from ..builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5
class Galactic2(Galactic):
pass
dt = 1000*u.s
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
FK5, Galactic2, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def fk5_to_gal2(fk5_coo, gal_frame):
trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2)
return trans(fk5_coo, gal_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Galactic2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def gal2_to_fk5(gal_coo, fk5_frame):
trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5)
return trans(gal_coo, fk5_frame)
c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s,
pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr,
distance=150*u.pc)
c2 = c1.transform_to(Galactic2)
c3 = c1.transform_to(Galactic)
# compare the matrix and finite-difference calculations
assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4)
assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4)
def test_gcrs_diffs():
time = Time('J2017')
gf = GCRS(obstime=time)
sung = get_sun(time) # should have very little vhelio
# qtr-year off sun location should be the direction of ~ maximal vhelio
qtrsung = get_sun(time-.25*u.year)
# now we use those essentially as directions where the velocities should
# be either maximal or minimal - with or perpendiculat to Earh's orbit
msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation)
suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
# Now we transform those parallel- and perpendicular-to Earth's orbit
# directions to GCRS, which should shift the velocity to either include
# the Earth's velocity vector, or not (for parallel and perpendicular,
# respectively).
sung = suni.transform_to(gf)
qtrsung = qtrsuni.transform_to(gf)
# should be high along the ecliptic-not-sun sun axis and
# low along the sun axis
assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s
assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s
assert np.abs(sung.radial_velocity) < 1*u.km/u.s
suni2 = sung.transform_to(ICRS)
assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
qtrisun2 = qtrsung.transform_to(ICRS)
assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
def test_altaz_diffs():
time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day
loc = get_builtin_sites()['greenwich']
aa = AltAz(obstime=time, location=loc)
icoo = ICRS(np.zeros_like(time)*u.deg, 10*u.deg, 100*u.au,
pm_ra_cosdec=np.zeros_like(time)*u.marcsec/u.yr,
pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
acoo = icoo.transform_to(aa)
# Make sure the change in radial velocity over ~2 days isn't too much
# more than the rotation speed of the Earth - some excess is expected
# because the orbit also shifts the RV, but it should be pretty small
# over this short a time.
assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER
cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential,
acoo.data)
# The "total" velocity should be > c, because the *tangential* velocity
# isn't a True velocity, but rather an induced velocity due to the Earth's
# rotation at a distance of 100 AU
assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c)
_xfail = pytest.mark.xfail
@pytest.mark.parametrize('distance', [1000*u.au,
10*u.pc,
pytest.mark.xfail(10*u.kpc),
pytest.mark.xfail(100*u.kpc)])
# below is xfail syntax for pytest >=3.1
# TODO: change to this when the above
# way of xfailing is turned off in pytest
# >=4.0
# pytest.param(10*u.kpc, marks=_xfail),
# pytest.param(100*u.kpc, marks=_xfail)])
# TODO: make these not fail when the
# finite-difference numerical stability
# is improved
def test_numerical_limits(distance):
"""
Tests the numerical stability of the default settings for the finite
difference transformation calculation. This is *known* to fail for at
>~1kpc, but this may be improved in future versions.
"""
time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year
icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
gcoo = icoo.transform_to(GCRS(obstime=time))
rv = gcoo.radial_velocity.to('km/s')
# if its a lot bigger than this - ~the maximal velocity shift along
# the direction above with a small allowance for noise - finite-difference
# rounding errors have ruined the calculation
assert np.ptp(rv) < 65*u.km/u.s
def diff_info_plot(frame, time):
"""
Useful for plotting a frame with multiple times. *Not* used in the testing
suite per se, but extremely useful for interactive plotting of results from
tests in this module.
"""
from matplotlib import pyplot as plt
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12))
ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-')
ax1.legend(['x', 'y', 'z'])
ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-')
ax2.set_title('total')
sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data)
ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-')
ax3.set_title('radial')
ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat')
ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon')
return fig
| |
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import mock
import testscenarios
from oslo.config import cfg
from oslo import messaging
from oslo.messaging.notify import dispatcher
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class ListenerSetupMixin(object):
class Listener(object):
def __init__(self, transport, targets, endpoints, expect_messages):
self._expect_messages = expect_messages
self._received_msgs = 0
self._listener = messaging.get_notification_listener(
transport, targets, [self] + endpoints, allow_requeue=True)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
self._received_msgs += 1
if self._expect_messages == self._received_msgs:
# Check start() does nothing with a running listener
self._listener.start()
self._listener.stop()
self._listener.wait()
def start(self):
self._listener.start()
def _setup_listener(self, transport, endpoints, expect_messages,
targets=None):
listener = self.Listener(transport,
targets=targets or [
messaging.Target(topic='testtopic')],
expect_messages=expect_messages,
endpoints=endpoints)
thread = threading.Thread(target=listener.start)
thread.daemon = True
thread.start()
return thread
def _stop_listener(self, thread):
thread.join(timeout=5)
return thread.isAlive()
def _setup_notifier(self, transport, topic='testtopic',
publisher_id='testpublisher'):
return messaging.Notifier(transport, topic=topic,
driver='messaging',
publisher_id=publisher_id)
class TestNotifyListener(test_utils.BaseTestCase, ListenerSetupMixin):
def __init__(self, *args):
super(TestNotifyListener, self).__init__(*args)
ListenerSetupMixin.__init__(self)
def setUp(self):
super(TestNotifyListener, self).setUp(conf=cfg.ConfigOpts())
def test_constructor(self):
transport = messaging.get_transport(self.conf, url='fake:')
target = messaging.Target(topic='foo')
endpoints = [object()]
listener = messaging.get_notification_listener(transport, [target],
endpoints)
self.assertIs(listener.conf, self.conf)
self.assertIs(listener.transport, transport)
self.assertIsInstance(listener.dispatcher,
dispatcher.NotificationDispatcher)
self.assertIs(listener.dispatcher.endpoints, endpoints)
self.assertEqual('blocking', listener.executor)
def test_no_target_topic(self):
transport = messaging.get_transport(self.conf, url='fake:')
listener = messaging.get_notification_listener(transport,
[messaging.Target()],
[mock.Mock()])
try:
listener.start()
except Exception as ex:
self.assertIsInstance(ex, messaging.InvalidTarget, ex)
else:
self.assertTrue(False)
def test_unknown_executor(self):
transport = messaging.get_transport(self.conf, url='fake:')
try:
messaging.get_notification_listener(transport, [], [],
executor='foo')
except Exception as ex:
self.assertIsInstance(ex, messaging.ExecutorLoadFailure)
self.assertEqual('foo', ex.executor)
else:
self.assertTrue(False)
def test_one_topic(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
listener_thread = self._setup_listener(transport, [endpoint], 1)
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test message')
self.assertFalse(self._stop_listener(listener_thread))
endpoint.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test message',
{'message_id': mock.ANY, 'timestamp': mock.ANY})
def test_two_topics(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
targets = [messaging.Target(topic="topic1"),
messaging.Target(topic="topic2")]
listener_thread = self._setup_listener(transport, [endpoint], 2,
targets=targets)
notifier = self._setup_notifier(transport, topic='topic1')
notifier.info({'ctxt': '1'}, 'an_event.start1', 'test')
notifier = self._setup_notifier(transport, topic='topic2')
notifier.info({'ctxt': '2'}, 'an_event.start2', 'test')
self.assertFalse(self._stop_listener(listener_thread))
endpoint.info.assert_has_calls([
mock.call({'ctxt': '1'}, 'testpublisher',
'an_event.start1', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({'ctxt': '2'}, 'testpublisher',
'an_event.start2', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY})],
any_order=True)
def test_two_exchanges(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info.return_value = None
targets = [messaging.Target(topic="topic",
exchange="exchange1"),
messaging.Target(topic="topic",
exchange="exchange2")]
listener_thread = self._setup_listener(transport, [endpoint], 2,
targets=targets)
notifier = self._setup_notifier(transport, topic="topic")
def mock_notifier_exchange(name):
def side_effect(target, ctxt, message, version, retry):
target.exchange = name
return transport._driver.send_notification(target, ctxt,
message, version,
retry=retry)
transport._send_notification = mock.MagicMock(
side_effect=side_effect)
notifier.info({'ctxt': '0'},
'an_event.start', 'test message default exchange')
mock_notifier_exchange('exchange1')
notifier.info({'ctxt': '1'},
'an_event.start', 'test message exchange1')
mock_notifier_exchange('exchange2')
notifier.info({'ctxt': '2'},
'an_event.start', 'test message exchange2')
self.assertFalse(self._stop_listener(listener_thread))
endpoint.info.assert_has_calls([
mock.call({'ctxt': '1'}, 'testpublisher', 'an_event.start',
'test message exchange1',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({'ctxt': '2'}, 'testpublisher', 'an_event.start',
'test message exchange2',
{'timestamp': mock.ANY, 'message_id': mock.ANY})],
any_order=True)
def test_two_endpoints(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint1 = mock.Mock()
endpoint1.info.return_value = None
endpoint2 = mock.Mock()
endpoint2.info.return_value = messaging.NotificationResult.HANDLED
listener_thread = self._setup_listener(transport,
[endpoint1, endpoint2], 1)
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test')
self.assertFalse(self._stop_listener(listener_thread))
endpoint1.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test', {
'timestamp': mock.ANY,
'message_id': mock.ANY})
endpoint2.info.assert_called_once_with(
{}, 'testpublisher', 'an_event.start', 'test', {
'timestamp': mock.ANY,
'message_id': mock.ANY})
def test_requeue(self):
transport = messaging.get_transport(self.conf, url='fake:')
endpoint = mock.Mock()
endpoint.info = mock.Mock()
def side_effect_requeue(*args, **kwargs):
if endpoint.info.call_count == 1:
return messaging.NotificationResult.REQUEUE
return messaging.NotificationResult.HANDLED
endpoint.info.side_effect = side_effect_requeue
listener_thread = self._setup_listener(transport,
[endpoint], 2)
notifier = self._setup_notifier(transport)
notifier.info({}, 'an_event.start', 'test')
self.assertFalse(self._stop_listener(listener_thread))
endpoint.info.assert_has_calls([
mock.call({}, 'testpublisher', 'an_event.start', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY}),
mock.call({}, 'testpublisher', 'an_event.start', 'test',
{'timestamp': mock.ANY, 'message_id': mock.ANY})])
| |
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import traceback
import warnings
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, NO, CANCEL,
information, FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
NoButtons = SceneEditor = trait_wraith
from ..bem import make_bem_solution, write_bem_solution
from ..coreg import bem_fname, trans_fname
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles, Transform)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_find_fiducials_files, _point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
_testing_mode)
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(InstSource, ())
# parameters
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
"based on the raw file name.")
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(depends_on=['processed_mri_points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
if self.n_scale_params == 0:
scaled_hair_dist = self.grow_hair / 1000
else:
scaled_hair_dist = self.grow_hair / self.scale / 1000
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing form bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to, skip_fiducials, do_bem_sol):
"Find all arguments needed for the scaling worker"
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_names = []
if do_bem_sol:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name='(.+-bem)')
bem_dir, pattern = os.path.split(pattern)
for filename in os.listdir(bem_dir):
match = re.match(pattern, filename)
if match:
bem_names.append(match.group(1))
return (subjects_dir, subject_from, subject_to, self.scale,
skip_fiducials, bem_names)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(0.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='can_prepare_bem_model'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# Setup scaling worker
def worker():
while True:
(subjects_dir, subject_from, subject_to, scale, skip_fiducials,
bem_names) = self.queue.get()
self.queue_len -= 1
# Scale MRI files
self.queue_current = 'Scaling %s...' % subject_to
try:
scale_mri(subject_from, subject_to, scale, True,
subjects_dir, skip_fiducials)
except:
logger.error('Error scaling %s:\n' % subject_to +
traceback.format_exc())
self.queue_feedback = ('Error scaling %s (see Terminal)' %
subject_to)
bem_names = () # skip bem solutions
else:
self.queue_feedback = 'Done scaling %s.' % subject_to
# Precompute BEM solutions
for bem_name in bem_names:
self.queue_current = ('Computing %s solution...' %
bem_name)
try:
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to,
name=bem_name)
bemsol = make_bem_solution(bem_file)
write_bem_solution(bem_file[:-4] + '-sol.fif', bemsol)
except:
logger.error('Error computing %s solution:\n' %
bem_name + traceback.format_exc())
self.queue_feedback = ('Error computing %s solution '
'(see Terminal)' % bem_name)
else:
self.queue_feedback = ('Done computing %s solution.' %
bem_name)
# Finalize
self.queue_current = ''
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not set_mne_root(True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the "
"mne_prepare_bem_model tool will fail. Please install "
"MNE.")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
# check that fiducials are saved
skip_fiducials = False
if self.n_scale_params and not _find_fiducials_files(subject_from,
subjects_dir):
msg = ("No fiducials file has been found for {src}. If fiducials "
"are not saved, they will not be available in the scaled "
"MRI. Should the current fiducials be saved now? "
"Select Yes to save the fiducials at "
"{src}/bem/{src}-fiducials.fif. "
"Select No to proceed scaling the MRI without fiducials.".
format(src=subject_from))
title = "Save Fiducials for %s?" % subject_from
rc = confirm(None, msg, title, cancel=True, default=CANCEL)
if rc == CANCEL:
return
elif rc == YES:
self.model.mri.save(self.model.mri.default_fid_fname)
elif rc == NO:
skip_fiducials = True
else:
raise RuntimeError("rc=%s" % repr(rc))
# find target subject
if self.n_scale_params:
subject_to = self.model.raw_subject or subject_from
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if not ui.result: # i.e., user pressed cancel
return
subject_to = mridlg.subject_to
else:
subject_to = subject_from
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file += '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, "Error saving -trans.fif file: %s (See terminal for "
"details)" % str(e), "Error Saving Trans File")
raise
# save the scaled MRI
if self.n_scale_params:
do_bem_sol = self.can_prepare_bem_model and self.prepare_bem_model
job = self.model.get_scaling_job(subject_to, skip_fiducials,
do_bem_sol)
self.queue.put(job)
self.queue_len += 1
def _scale_x_dec_fired(self):
self.scale_x -= self.scale_step
def _scale_x_inc_fired(self):
self.scale_x += self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_z *= step
def _scale_z_inc_fired(self):
self.scale_z *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_from:
# weird trait state that occurs even when subject_from is set
return
elif not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw/Epochs/Evoked)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene, name="MRI Scalp")
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale, name='LPA')
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale, name='Nasion')
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale, name='RPA')
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5, name='HSP')
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-LPA')
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale, name='HSP-Nasion')
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale, name='HSP-RPA')
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
if not _testing_mode():
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
| |
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Install *_incremental.apk targets as well as their dependent files."""
import argparse
import glob
import logging
import os
import posixpath
import shutil
import sys
import zipfile
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
import devil_chromium
from devil.android import apk_helper
from devil.android import device_utils
from devil.android import device_errors
from devil.android.sdk import version_codes
from devil.utils import reraiser_thread
from pylib import constants
from pylib.utils import run_tests_helper
from pylib.utils import time_profile
prev_sys_path = list(sys.path)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
from util import build_utils
sys.path = prev_sys_path
def _DeviceCachePath(device):
file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
return os.path.join(constants.GetOutDirectory(), file_name)
def _TransformDexPaths(paths):
"""Given paths like ["/a/b/c", "/a/c/d"], returns ["b.c", "c.d"]."""
if len(paths) == 1:
return [os.path.basename(paths[0])]
prefix_len = len(os.path.commonprefix(paths))
return [p[prefix_len:].replace(os.sep, '.') for p in paths]
def _Execute(concurrently, *funcs):
"""Calls all functions in |funcs| concurrently or in sequence."""
timer = time_profile.TimeProfile()
if concurrently:
reraiser_thread.RunAsync(funcs)
else:
for f in funcs:
f()
timer.Stop(log=False)
return timer
def _GetDeviceIncrementalDir(package):
"""Returns the device path to put incremental files for the given package."""
return '/data/local/tmp/incremental-app-%s' % package
def _HasClasses(jar_path):
"""Returns whether the given jar contains classes.dex."""
with zipfile.ZipFile(jar_path) as jar:
return 'classes.dex' in jar.namelist()
def Uninstall(device, package, enable_device_cache=False):
"""Uninstalls and removes all incremental files for the given package."""
main_timer = time_profile.TimeProfile()
device.Uninstall(package)
if enable_device_cache:
# Uninstall is rare, so just wipe the cache in this case.
cache_path = _DeviceCachePath(device)
if os.path.exists(cache_path):
os.unlink(cache_path)
device.RunShellCommand(['rm', '-rf', _GetDeviceIncrementalDir(package)],
check_return=True)
logging.info('Uninstall took %s seconds.', main_timer.GetDelta())
def Install(device, apk, split_globs=None, native_libs=None, dex_files=None,
enable_device_cache=False, use_concurrency=True,
show_proguard_warning=False, permissions=(),
allow_downgrade=True):
"""Installs the given incremental apk and all required supporting files.
Args:
device: A DeviceUtils instance.
apk: The path to the apk, or an ApkHelper instance.
split_globs: Glob patterns for any required apk splits (optional).
native_libs: List of app's native libraries (optional).
dex_files: List of .dex.jar files that comprise the app's Dalvik code.
enable_device_cache: Whether to enable on-device caching of checksums.
use_concurrency: Whether to speed things up using multiple threads.
show_proguard_warning: Whether to print a warning about Proguard not being
enabled after installing.
permissions: A list of the permissions to grant, or None to grant all
non-blacklisted permissions in the manifest.
"""
main_timer = time_profile.TimeProfile()
install_timer = time_profile.TimeProfile()
push_native_timer = time_profile.TimeProfile()
push_dex_timer = time_profile.TimeProfile()
apk = apk_helper.ToHelper(apk)
apk_package = apk.GetPackageName()
device_incremental_dir = _GetDeviceIncrementalDir(apk_package)
# Install .apk(s) if any of them have changed.
def do_install():
install_timer.Start()
if split_globs:
splits = []
for split_glob in split_globs:
splits.extend((f for f in glob.glob(split_glob)))
device.InstallSplitApk(apk, splits, reinstall=True,
allow_cached_props=True, permissions=permissions,
allow_downgrade=allow_downgrade)
else:
device.Install(apk, reinstall=True, permissions=permissions,
allow_downgrade=allow_downgrade)
install_timer.Stop(log=False)
# Push .so and .dex files to the device (if they have changed).
def do_push_files():
if native_libs:
push_native_timer.Start()
with build_utils.TempDir() as temp_dir:
device_lib_dir = posixpath.join(device_incremental_dir, 'lib')
for path in native_libs:
# Note: Can't use symlinks as they don't work when
# "adb push parent_dir" is used (like we do here).
shutil.copy(path, os.path.join(temp_dir, os.path.basename(path)))
device.PushChangedFiles([(temp_dir, device_lib_dir)],
delete_device_stale=True)
push_native_timer.Stop(log=False)
if dex_files:
push_dex_timer.Start()
# Put all .dex files to be pushed into a temporary directory so that we
# can use delete_device_stale=True.
with build_utils.TempDir() as temp_dir:
device_dex_dir = posixpath.join(device_incremental_dir, 'dex')
# Ensure no two files have the same name.
transformed_names = _TransformDexPaths(dex_files)
for src_path, dest_name in zip(dex_files, transformed_names):
# Binary targets with no extra classes create .dex.jar without a
# classes.dex (which Android chokes on).
if _HasClasses(src_path):
shutil.copy(src_path, os.path.join(temp_dir, dest_name))
device.PushChangedFiles([(temp_dir, device_dex_dir)],
delete_device_stale=True)
push_dex_timer.Stop(log=False)
def check_selinux():
# Marshmallow has no filesystem access whatsoever. It might be possible to
# get things working on Lollipop, but attempts so far have failed.
# http://crbug.com/558818
has_selinux = device.build_version_sdk >= version_codes.LOLLIPOP
if has_selinux and apk.HasIsolatedProcesses():
raise Exception('Cannot use incremental installs on Android L+ without '
'first disabling isoloated processes.\n'
'To do so, use GN arg:\n'
' disable_incremental_isolated_processes=true')
cache_path = _DeviceCachePath(device)
def restore_cache():
if not enable_device_cache:
logging.info('Ignoring device cache')
return
if os.path.exists(cache_path):
logging.info('Using device cache: %s', cache_path)
with open(cache_path) as f:
device.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.info('No device cache present: %s', cache_path)
def save_cache():
with open(cache_path, 'w') as f:
f.write(device.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
# Create 2 lock files:
# * install.lock tells the app to pause on start-up (until we release it).
# * firstrun.lock is used by the app to pause all secondary processes until
# the primary process finishes loading the .dex / .so files.
def create_lock_files():
# Creates or zeros out lock files.
cmd = ('D="%s";'
'mkdir -p $D &&'
'echo -n >$D/install.lock 2>$D/firstrun.lock')
device.RunShellCommand(cmd % device_incremental_dir, check_return=True)
# The firstrun.lock is released by the app itself.
def release_installer_lock():
device.RunShellCommand('echo > %s/install.lock' % device_incremental_dir,
check_return=True)
# Concurrency here speeds things up quite a bit, but DeviceUtils hasn't
# been designed for multi-threading. Enabling only because this is a
# developer-only tool.
setup_timer = _Execute(
use_concurrency, create_lock_files, restore_cache, check_selinux)
_Execute(use_concurrency, do_install, do_push_files)
finalize_timer = _Execute(use_concurrency, release_installer_lock, save_cache)
logging.info(
'Took %s seconds (setup=%s, install=%s, libs=%s, dex=%s, finalize=%s)',
main_timer.GetDelta(), setup_timer.GetDelta(), install_timer.GetDelta(),
push_native_timer.GetDelta(), push_dex_timer.GetDelta(),
finalize_timer.GetDelta())
if show_proguard_warning:
logging.warning('Target had proguard enabled, but incremental install uses '
'non-proguarded .dex files. Performance characteristics '
'may differ.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('apk_path',
help='The path to the APK to install.')
parser.add_argument('--split',
action='append',
dest='splits',
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument('--native_lib',
dest='native_libs',
help='Path to native library (repeatable)',
action='append',
default=[])
parser.add_argument('--dex-file',
dest='dex_files',
help='Path to dex files (repeatable)',
action='append',
default=[])
parser.add_argument('-d', '--device', dest='device',
help='Target device for apk to install on.')
parser.add_argument('--uninstall',
action='store_true',
default=False,
help='Remove the app and all side-loaded files.')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--no-threading',
action='store_false',
default=True,
dest='threading',
help='Do not install and push concurrently')
parser.add_argument('--no-cache',
action='store_false',
default=True,
dest='cache',
help='Do not use cached information about what files are '
'currently on the target device.')
parser.add_argument('--show-proguard-warning',
action='store_true',
default=False,
help='Print a warning about proguard being disabled')
parser.add_argument('--dont-even-try',
help='Prints this message and exits.')
parser.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
parser.add_argument('--disable-downgrade',
action='store_false',
default=True,
dest='allow_downgrade',
help='Disable install of apk with lower version number'
'than the version already on the device.')
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType('Debug')
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
devil_chromium.Initialize(output_directory=constants.GetOutDirectory())
if args.dont_even_try:
logging.fatal(args.dont_even_try)
return 1
# Retries are annoying when commands fail for legitimate reasons. Might want
# to enable them if this is ever used on bots though.
device = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.device,
default_retries=0,
enable_device_files_cache=True)[0]
apk = apk_helper.ToHelper(args.apk_path)
if args.uninstall:
Uninstall(device, apk.GetPackageName(), enable_device_cache=args.cache)
else:
Install(device, apk, split_globs=args.splits, native_libs=args.native_libs,
dex_files=args.dex_files, enable_device_cache=args.cache,
use_concurrency=args.threading,
show_proguard_warning=args.show_proguard_warning,
allow_downgrade=args.allow_downgrade)
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_utils import uuidutils
from barbican.api.controllers import acls
from barbican.model import repositories
from barbican.tests.api import test_resources_policy as test_policy
from barbican.tests import utils
class TestACLsWithContextMixin(test_policy.BaseTestCase):
"""Mixin for performing common acls operation used with policy logic."""
def _create_secret_with_creator_user(self, app, creator_user_id):
# define creator user for new secret entry.
app.extra_environ = {
'barbican.context': self._build_context(self.project_id,
user=creator_user_id)
}
secret_id, _ = create_secret(app)
return secret_id
def _create_container_with_creator_user(self, app, creator_user_id):
# define creator user for new container entry.
app.extra_environ = {
'barbican.context': self._build_context(self.project_id,
user=creator_user_id)
}
container_id, _ = create_container(app)
return container_id
def _set_acls_with_context(self, app, entity_type=None, op_type=None,
entity_id=None, roles=None, user=None,
enforce_policy=True, expect_errors=False):
"""Perform acl create/update/delete operation with policy logic.
Before performing acl create/update/delete, provided input is used
for setting custom barbican context. Operation is done under policy
enforcement logic.
"""
policy_enforcer = self.policy_enforcer if enforce_policy else None
app.extra_environ = {
'barbican.context': self._build_context(
self.project_id, roles=roles, user=user,
is_admin=False, policy_enforcer=policy_enforcer)
}
resp = None
if op_type == 'create':
resp = create_acls(app, entity_type, entity_id,
read_user_ids=['u1', 'u2'],
expect_errors=expect_errors)
elif op_type == 'update':
resp = update_acls(app, entity_type, entity_id,
read_user_ids=['u1', 'u2'],
partial_update=True,
expect_errors=expect_errors)
elif op_type == 'delete':
resp = app.delete('/{0}/{1}/acl'.format(entity_type, entity_id),
expect_errors=expect_errors)
return resp
class WhenTestingSecretACLsResource(utils.BarbicanAPIBaseTestCase,
TestACLsWithContextMixin):
def test_can_create_new_secret_acls(self):
"""Create secret acls and compare stored values with request data."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
def test_who_can_create_new_secret_acls(self):
"""Test who can create new secret ACLs as per policy rules.
New secret ACLs can be created by user who created the secret.
Other user with 'creator' role in secret project cannot create ACL
if user is not creator of the secret.
User with 'admin' role in secret project can create ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
secret_uuid2 = self._create_secret_with_creator_user(
self.app, creator_user_id)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id, expect_errors=False)
self.assertEqual(200, resp.status_int)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid2, roles=['admin'], user='AdminUser',
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_create_new_secret_acls_with_project_access_false(self):
"""Should allow creating acls for a new secret with project-access."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertFalse(acl_map['read']['project_access'])
def test_new_secret_acls_with_invalid_project_access_value_should_fail(
self):
"""Should fail if project-access flag is provided as string value."""
secret_uuid, _ = create_secret(self.app)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access="False",
read_user_ids=['u1', 'u3', 'u4'],
expect_errors=True)
self.assertEqual(400, resp.status_int)
resp = create_acls(
self.app, 'secrets', secret_uuid,
read_project_access="None",
expect_errors=True)
self.assertEqual(400, resp.status_int)
def test_get_secret_acls_with_complete_acl_data(self):
"""Read existing acls for a with complete acl data."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_user_ids=['u1', 'u3'], read_project_access=False)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('read', resp.json)
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
self.assertEqual({'u1', 'u3'}, set(resp.json['read']['users']))
def test_get_secret_acls_with_project_access_data(self):
"""Read existing acls for acl when only project-access flag is set."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=False)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertEqual([], resp.json['read']['users'])
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
def test_get_secret_acls_invalid_secret_should_fail(self):
"""Get secret acls should fail for invalid secret id.
This test applies to all secret ACLs methods as secret entity is
populated in same manner for get, put, patch, delete methods.
"""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=True,
read_user_ids=['u1', 'u3', 'u4'])
resp = self.app.get(
'/secrets/{0}/acl'.format(uuidutils.generate_uuid(dashed=False)),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_secret_acls_no_acls_defined_return_default_acl(self):
"""Get secret acls should pass when no acls defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.get(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=True)
self.assertEqual(200, resp.status_int)
self.assertEqual(acls.DEFAULT_ACL, resp.json)
def test_get_secret_acls_with_incorrect_uri_should_fail(self):
"""Get secret acls should fail when no acls defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.get(
'/secrets/{0}/incorrect_acls'.format(secret_id),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_full_update_secret_acls_modify_project_access_value(self):
"""ACLs full update with userids where project-access flag modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'],
read_project_access=False)
# update acls with no user input so it should delete existing users
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertIsNone(acl_map['read'].to_dict_fields().get('users'))
def test_full_update_secret_acls_modify_users_only(self):
"""ACLs full update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'], read_project_access=False)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
self.assertEqual({'u1', 'u3', 'u5'},
set(acl_map['read'].to_dict_fields()['users']))
def test_full_update_secret_acls_with_read_users_only(self):
"""Acls full update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# ACL api does not support 'list' operation so making direct db update
# in acl operation data to make sure full update removes this existing
# ACL.
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# make sure 'list' operation is no longer after full update
self.assertNotIn('list', acl_map)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u3', 'u5'},
set(acl_map['read'].to_dict_fields()['users']))
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
def test_partial_update_secret_acls_with_read_users_only(self):
"""Acls update where specific operation acl is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
# For partial update, existing other operation ACL is not tocuhed.
self.assertIn('list', acl_map)
self.assertEqual({'u1', 'u2'},
set(acl_map['list'].to_dict_fields()['users']))
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u3', 'u5'},
set(acl_map['read'].to_dict_fields()['users']))
def test_partial_update_secret_acls_when_no_acls_defined_should_pass(self):
"""Acls partial update pass when no acls are defined for a secret.
Partial update (PATCH) is applicable even when no explicit ACL has been
set as by default every secret has implicit acl definition. If PUT
is used, then new ACL is created instead.
"""
secret_id, _ = create_secret(self.app)
resp = update_acls(
self.app, 'secrets', secret_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'], expect_errors=False)
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(secret_id, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
def test_who_can_update_secret_acls(self):
"""Test PATCH update existing secret ACLs as per policy rules.
Existing secret ACLs can be updated by user who created the secret.
Other user with 'creator' role in secret project cannot update ACL
if user is not creator of the secret.
User with 'admin' role in secret project can update ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id)
self.assertEqual(200, resp.status_int)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='update',
entity_id=secret_uuid, roles=['admin'], user='AdminUser')
self.assertEqual(200, resp.status_int)
def test_partial_update_secret_acls_modify_project_access_values(self):
"""Acls partial update where project-access flag is modified."""
secret_uuid, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_uuid,
read_user_ids=['u1', 'u2'],
read_project_access=False)
resp = update_acls(
self.app, 'secrets', secret_uuid, partial_update=True,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/secrets/{0}/acl'.format(secret_uuid),
resp.json['acl_ref'])
acl_map = _get_acl_map(secret_uuid, is_secret=True)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u2'},
set(acl_map['read'].to_dict_fields()['users']))
def test_delete_secret_acls_with_valid_secret_id(self):
"""Delete existing acls for a given secret."""
secret_id, _ = create_secret(self.app)
create_acls(
self.app, 'secrets', secret_id,
read_project_access=True)
resp = self.app.delete(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
content = resp.json
self.assertIsNone(content) # make sure there is no response
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(secret_id, is_secret=True)
self.assertFalse(acl_map)
def test_delete_secret_acls_no_acl_defined_should_pass(self):
"""Delete acls should pass when no acls are defined for a secret."""
secret_id, _ = create_secret(self.app)
resp = self.app.delete(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_who_can_delete_secret_acls(self):
"""Test who can delete existing secret ACLs as per policy rules.
Existing secret ACLs can be deleted by user who created the secret.
Other user with 'creator' role in secret project cannot delete ACL
if user is not creator of the secret.
User with 'admin' role in secret project can delete ACL for that
secret.
"""
creator_user_id = 'creatorUserId'
secret_uuid = self._create_secret_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['creator'], user='NotSecretCreator',
expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['creator'],
user=creator_user_id)
self.assertEqual(200, resp.status_int)
# Create new secret ACLs again.
self._set_acls_with_context(
self.app, entity_type='secrets', op_type='create',
entity_id=secret_uuid, enforce_policy=False)
# test for user with 'admin' role in secret project
resp = self._set_acls_with_context(
self.app, entity_type='secrets', op_type='delete',
entity_id=secret_uuid, roles=['admin'],
user='AdminUser')
self.assertEqual(200, resp.status_int)
def test_invoke_secret_acls_head_should_fail(self):
"""Should fail as put request to secret acls URI is not supported."""
secret_id, _ = create_secret(self.app)
resp = self.app.head(
'/secrets/{0}/acl'.format(secret_id),
expect_errors=True)
self.assertEqual(405, resp.status_int)
def test_list_secrets_with_no_acls_and_acl_only_should_be_empty(self):
"""Return list should be empty"""
creator_user_id = 'creatorUserID'
self._create_secret_with_creator_user(
self.app, creator_user_id)
resp = self.app.get(
'/secrets/?acl_only=TRUE')
self.assertEqual(200, resp.status_int)
self.assertEqual([], resp.json['secrets'])
def test_list_secrets_with_acls(self):
"""Return List should not include secrets with no ACL for user"""
creator_user_id = 'creatorUserID'
secret_uuid_acl_1 = self._create_secret_with_creator_user(
self.app, creator_user_id)
secret_uuid_acl_2 = self._create_secret_with_creator_user(
self.app, creator_user_id)
secret_uuid_no_acl = self._create_secret_with_creator_user(
self.app, creator_user_id)
create_acls(
self.app, 'secrets', secret_uuid_acl_1,
read_user_ids=[creator_user_id],
read_project_access=False)
create_acls(
self.app, 'secrets', secret_uuid_acl_2,
read_user_ids=[creator_user_id],
read_project_access=False)
resp = self.app.get(
'/secrets/?acl_only=TrUe')
self.assertEqual(200, resp.status_int)
secret_list = resp.json.get('secrets')
self.assertEqual(2, len(secret_list))
self.assertNotIn(secret_uuid_no_acl, secret_list)
class WhenTestingContainerAclsResource(utils.BarbicanAPIBaseTestCase,
TestACLsWithContextMixin):
def test_can_create_new_container_acls(self):
"""Create container acls and compare db values with request data."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u2'},
set(acl_map['read'].to_dict_fields()['users']))
def test_who_can_create_new_container_acls(self):
"""Test who can create new container ACLs as per policy rules.
New container ACLs can be created by user who created the container.
Other user with 'creator' role in container project cannot create ACL
if user is not creator of the container.
User with 'admin' role in container project can create ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
container_id2 = self._create_container_with_creator_user(
self.app, creator_user_id)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, roles=['creator'],
user='NotContainerCreator', expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, roles=['creator'],
user=creator_user_id, expect_errors=False)
self.assertEqual(200, resp.status_int)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id2, roles=['admin'], user='AdminUser',
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_create_new_container_acls_with_project_access_true(self):
"""Should allow creating acls for new container with project-access."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access=True,
read_user_ids=['u1', 'u3', 'u4'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
def test_create_new_container_acls_with_project_access_false(self):
"""Should allow creating acls for new container with project-access."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access=False,
read_user_ids=['u1', 'u3', 'u4'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map['read']['project_access'])
def test_container_acls_with_invalid_project_access_value_fail(self):
"""Should fail if project-access flag is provided as string value."""
container_id, _ = create_container(self.app)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access="False",
read_user_ids=['u1', 'u3', 'u4'],
expect_errors=True)
self.assertEqual(400, resp.status_int)
resp = create_acls(
self.app, 'containers', container_id,
read_project_access="None",
expect_errors=True)
self.assertEqual(400, resp.status_int)
def test_get_container_acls_with_complete_acl_data(self):
"""Read existing acls for a with complete acl data."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u3'], read_project_access=False)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('read', resp.json)
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
self.assertEqual({'u1', 'u3'}, set(resp.json['read']['users']))
def test_get_container_acls_with_project_access_data(self):
"""Read existing acls for acl when only project-access flag is set."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=False)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertEqual([], resp.json['read']['users'])
self.assertFalse(resp.json['read']['project-access'])
self.assertIsNotNone(resp.json['read']['created'])
self.assertIsNotNone(resp.json['read']['updated'])
def test_get_container_acls_invalid_container_id_should_fail(self):
"""Get container acls should fail for invalid secret id.
This test applies to all container ACLs methods as secret entity is
populated in same manner for get, put, patch, delete methods.
"""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.get(
'/containers/{0}/acl'.format(uuidutils.generate_uuid()),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_container_acls_invalid_non_uuid_secret_should_fail(self):
"""Get container acls should fail for invalid (non-uuid) id."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.get(
'/containers/{0}/acl'.format('my_container_id'),
expect_errors=True)
self.assertEqual(404, resp.status_int)
def test_get_container_acls_no_acls_defined_return_default_acl(self):
"""Get container acls should pass when no acls defined for a secret."""
container_id, _ = create_container(self.app)
resp = self.app.get(
'/containers/{0}/acl'.format(container_id),
expect_errors=True)
self.assertEqual(200, resp.status_int)
self.assertEqual(acls.DEFAULT_ACL, resp.json)
def test_full_update_container_acls_modify_all_acls(self):
"""Acls update where only user ids list is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id, read_project_access=False,
read_user_ids=['u1', 'u2'])
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_user_ids=['u1', 'u2', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# Check project_access is True when not provided
self.assertTrue(acl_map['read']['project_access'])
self.assertIn('u5', acl_map['read'].to_dict_fields()['users'])
def test_full_update_container_acls_modify_project_access_values(self):
"""Acls update where user ids and project-access flag is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_project_access=False)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map['read']['project_access'])
self.assertIsNone(acl_map['read'].to_dict_fields().get('users'))
def test_full_update_container_acls_with_read_users_only(self):
"""Acls full update where specific operation acl is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(container_id, is_secret=False)
# ACL api does not support 'list' operation so making direct db update
# in acl operation data to make sure full update removes this existing
# ACL.
container_acl = acl_map['read']
container_acl.operation = 'list'
container_acl.save()
acl_map = _get_acl_map(container_id, is_secret=False)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'containers', container_id, partial_update=False,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# make sure 'list' operation is no longer after full update
self.assertNotIn('list', acl_map)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u3', 'u5'},
set(acl_map['read'].to_dict_fields()['users']))
self.assertNotIn('u2', acl_map['read'].to_dict_fields()['users'])
def test_partial_update_container_acls_with_read_users_only(self):
"""Acls update where specific operation acl is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'])
acl_map = _get_acl_map(container_id, is_secret=False)
secret_acl = acl_map['read']
secret_acl.operation = 'list'
secret_acl.save()
acl_map = _get_acl_map(container_id, is_secret=False)
# check 'list' operation is there in db
self.assertIn('list', acl_map)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'])
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
# For partial update, existing other operation ACL is not tocuhed.
self.assertIn('list', acl_map)
self.assertEqual({'u1', 'u2'},
set(acl_map['list'].to_dict_fields()['users']))
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u3', 'u5'},
set(acl_map['read'].to_dict_fields()['users']))
def test_partial_update_container_acls_when_no_acls_defined(self):
"""Acls partial update pass when no acls are defined for container.
Partial update (PATCH) is applicable even when no explicit ACL has been
set as by default every container has implicit acl definition. If PUT
is used, then new ACL is created instead.
"""
container_id, _ = create_container(self.app)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_user_ids=['u1', 'u3', 'u5'], expect_errors=False)
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
def test_partial_update_container_acls_modify_project_access_values(self):
"""Acls partial update where project-access flag is modified."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_user_ids=['u1', 'u2'],
read_project_access=False)
resp = update_acls(
self.app, 'containers', container_id, partial_update=True,
read_project_access=True)
self.assertEqual(200, resp.status_int)
self.assertIsNotNone(resp.json)
self.assertIn('/containers/{0}/acl'.format(container_id),
resp.json['acl_ref'])
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertTrue(acl_map['read']['project_access'])
self.assertEqual({'u1', 'u2'},
set(acl_map['read'].to_dict_fields()['users']))
def test_who_can_update_container_acls(self):
"""Test PATCH update existing container ACLs as per policy rules.
Existing container ACLs can be updated by user who created the
container.
Other user with 'creator' role in container project cannot update ACL
if user is not creator of the container.
User with 'admin' role in container project can update ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['creator'], user='NotCreator',
expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['creator'],
user=creator_user_id)
self.assertEqual(200, resp.status_int)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='update',
entity_id=container_id, roles=['admin'], user='AdminUser')
self.assertEqual(200, resp.status_int)
def test_delete_container_acls_with_valid_container_id(self):
"""Delete existing acls for a given container."""
container_id, _ = create_container(self.app)
create_acls(
self.app, 'containers', container_id,
read_project_access=True)
resp = self.app.delete(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
content = resp.json
self.assertIsNone(content) # make sure there is no response
self.assertEqual(200, resp.status_int)
acl_map = _get_acl_map(container_id, is_secret=False)
self.assertFalse(acl_map)
def test_delete_container_acls_no_acl_defined_should_pass(self):
"""Delete acls should pass when no acls are defined for a container."""
container_id, _ = create_container(self.app)
resp = self.app.delete(
'/containers/{0}/acl'.format(container_id),
expect_errors=False)
self.assertEqual(200, resp.status_int)
def test_who_can_delete_container_acls(self):
"""Test who can delete existing container ACLs as per policy rules.
Existing container ACLs can be deleted by user who created the
container.
Other user with 'creator' role in container project cannot delete ACL
if user is not creator of the container.
User with 'admin' role in container project can delete ACL for that
container.
"""
creator_user_id = 'creatorUserId'
container_id = self._create_container_with_creator_user(
self.app, creator_user_id)
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['creator'], user='NotCreator',
expect_errors=True)
self.assertEqual(403, resp.status_int)
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['creator'],
user=creator_user_id)
self.assertEqual(200, resp.status_int)
# Create new container ACLs again.
self._set_acls_with_context(
self.app, entity_type='containers', op_type='create',
entity_id=container_id, enforce_policy=False)
# test for user with 'admin' role in container project
resp = self._set_acls_with_context(
self.app, entity_type='containers', op_type='delete',
entity_id=container_id, roles=['admin'],
user='AdminUser')
self.assertEqual(200, resp.status_int)
def test_invoke_container_acls_head_should_fail(self):
"""PUT request to container acls URI is not supported."""
container_id, _ = create_container(self.app)
resp = self.app.head(
'/containers/{0}/acl/'.format(container_id),
expect_errors=True)
self.assertEqual(405, resp.status_int)
# ----------------------- Helper Functions ---------------------------
def create_secret(app, name=None, algorithm=None, bit_length=None, mode=None,
expiration=None, payload='not-encrypted',
content_type='text/plain',
content_encoding=None, transport_key_id=None,
transport_key_needed=None, expect_errors=False):
request = {
'name': name,
'algorithm': algorithm,
'bit_length': bit_length,
'mode': mode,
'expiration': expiration,
'payload': payload,
'payload_content_type': content_type,
'payload_content_encoding': content_encoding,
'transport_key_id': transport_key_id,
'transport_key_needed': transport_key_needed
}
cleaned_request = {key: val for key, val in request.items()
if val is not None}
resp = app.post_json(
'/secrets/',
cleaned_request,
expect_errors=expect_errors
)
created_uuid = None
if resp.status_int == 201:
secret_ref = resp.json.get('secret_ref', '')
_, created_uuid = os.path.split(secret_ref)
return created_uuid, resp
def create_container(app):
_, resp = create_secret(app)
secret_ref = resp.json['secret_ref']
request = {
"name": "container name",
"type": "generic",
"secret_refs": [
{
"name": "any_key",
"secret_ref": secret_ref
}
]
}
resp = app.post_json(
'/containers/',
request,
expect_errors=False
)
created_uuid = None
if resp.status_int == 201:
container_ref = resp.json.get('container_ref', '')
_, created_uuid = os.path.split(container_ref)
return created_uuid, resp
def create_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None,
expect_errors=False):
return manage_acls(app, entity_type, entity_id,
read_user_ids=read_user_ids,
read_project_access=read_project_access,
is_update=False, partial_update=False,
expect_errors=expect_errors)
def update_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None, partial_update=False,
expect_errors=False):
return manage_acls(app, entity_type, entity_id,
read_user_ids=read_user_ids,
read_project_access=read_project_access,
is_update=True, partial_update=partial_update,
expect_errors=expect_errors)
def manage_acls(app, entity_type, entity_id, read_user_ids=None,
read_project_access=None, is_update=False,
partial_update=None, expect_errors=False):
request = {}
_append_acl_to_request(request, 'read', read_user_ids,
read_project_access)
cleaned_request = {key: val for key, val in request.items()
if val is not None}
if is_update and partial_update: # patch for partial update
resp = app.patch_json(
'/{0}/{1}/acl'.format(entity_type, entity_id),
cleaned_request,
expect_errors=expect_errors)
else: # put (for create or complete update)
resp = app.put_json(
'/{0}/{1}/acl'.format(entity_type, entity_id),
cleaned_request,
expect_errors=expect_errors)
return resp
def _append_acl_to_request(req, operation, user_ids=None, project_access=None):
op_dict = {}
if user_ids is not None:
op_dict['users'] = user_ids
if project_access is not None:
op_dict['project-access'] = project_access
if op_dict:
req[operation] = op_dict
def _get_acl_map(entity_id, is_secret=True):
"""Provides map of operation: acl_entity for given entity id."""
if is_secret:
acl_repo = repositories.get_secret_acl_repository()
acl_map = {acl.operation: acl for acl in
acl_repo.get_by_secret_id(entity_id)}
else:
acl_repo = repositories.get_container_acl_repository()
acl_map = {acl.operation: acl for acl in
acl_repo.get_by_container_id(entity_id)}
return acl_map
| |
from twisted.internet.defer import inlineCallbacks
from battlesnake.outbound_commands import think_fn_wrappers
from battlesnake.outbound_commands import mux_commands
from battlesnake.outbound_commands import unit_manipulation
from battlesnake.outbound_commands.think_fn_wrappers import get_map_dimensions
from battlesnake.outbound_commands.unit_manipulation import \
restore_mechprefs_on_unit
from battlesnake.plugins.contrib.arena_master.db_api import \
update_match_game_state_in_db, \
update_match_difficulty_in_db
from battlesnake.plugins.contrib.arena_master.puppets.kill_tracking import \
record_kill
from battlesnake.plugins.contrib.arena_master.puppets.units.unit_store import \
ArenaMapUnitStore
class ArenaMasterPuppet(object):
"""
This is a base class for Arena Master puppets. Each game mode sub-classes
this. We use it to track game state, and have methods for various events.
"""
def __init__(self, protocol, dbref):
self.protocol = protocol
self.dbref = dbref
self.arena_name = 'Arena %s' % self.dbref[1:]
self.map_dbref = None
self.staging_dbref = None
self.puppet_ol_dbref = None
self.leader_dbref = None
self.creator_dbref = None
# A cache for all units in the arena, plus their states.
self.unit_store = None
self.map_width = None
self.map_height = None
# Currently only 'wave'.
self.game_mode = None
# One of: 'staging', 'in-between', 'active', 'finished'
self.game_state = None
# One of: 'easy', 'normal', 'hard', 'overkill'
self.difficulty_level = None
# Match ID in the DB.
self.match_id = None
def __str__(self):
return u"<ArenaMasterPuppet: %s for map %s>" % (self.dbref, self.map_dbref)
def get_ingame_attr_map(self):
"""
This method maps attributes on the in-game arena master puppet to
attributes on this instance. The keys are the in-game attribute names,
the values are the instance attribute names.
:rtype: dict
"""
return {
'MAP.DBREF': 'map_dbref',
'LEADER.DBREF': 'leader_dbref',
'CREATOR.DBREF': 'creator_dbref',
'STAGING_ROOM.DBREF': 'staging_dbref',
'PUPPET_OL.DBREF': 'puppet_ol_dbref',
'GAME_MODE.D': 'game_mode',
'GAME_STATE.D': 'game_state',
'DIFFICULTY_LEVEL.D': 'difficulty_level',
'MATCH_ID.D': 'match_id',
}
@inlineCallbacks
def load_arena_from_ingame_obj(self):
"""
Pulls all of the attributes mentioned in the attribute map and sets them
to the appropriate variables on the instance.
"""
p = self.protocol
ingame_attr_map = self.get_ingame_attr_map()
ingame_attrs = yield think_fn_wrappers.get_attrs(
p, self.dbref, ingame_attr_map.keys())
# Convert attribute keys to ArenaMasterPuppet kwargs.
arena_kwargs = {ingame_attr_map[k]: v for k, v in ingame_attrs.items()}
for attr, val in arena_kwargs.items():
setattr(self, attr, val)
self.difficulty_level = self.difficulty_level.lower()
self.map_width, self.map_height = yield get_map_dimensions(
p, arena_kwargs['map_dbref'])
self.unit_store = ArenaMapUnitStore(
arena_master_puppet=self, unit_change_callback=self.handle_unit_change)
@property
def id(self):
"""
:returns: The arena ID, which is just the dbref without the # sign.
"""
return self.dbref[1:]
@inlineCallbacks
def change_game_state(self, new_state):
"""
Changes the match's state.
:param str new_state: See GAME_STATE_* defines.
"""
new_state = new_state.lower()
self.game_state = new_state
attrs = {'GAME_STATE.D': new_state}
yield think_fn_wrappers.set_attrs(self.protocol, self.dbref, attrs)
yield update_match_game_state_in_db(self)
@inlineCallbacks
def set_difficulty(self, new_difficulty):
"""
Sets the difficulty level for an arena.
:param float new_difficulty: See ARENA_DIFFICULTY_LEVEL's keys.
"""
self.difficulty_level = new_difficulty
attrs = {'DIFFICULTY_LEVEL.D': self.difficulty_level}
yield think_fn_wrappers.set_attrs(self.protocol, self.dbref, attrs)
yield update_match_difficulty_in_db(self)
message = (
"%ch[name({leader_dbref})] has set the difficulty "
"level to: %cy{difficulty}%cn".format(
leader_dbref=self.leader_dbref, difficulty=new_difficulty))
self.pemit_throughout_zone(message)
@inlineCallbacks
def set_arena_leader(self, new_leader):
"""
Changes an arena's leader.
:param str new_leader: A valid player dbref.
"""
self.leader_dbref = new_leader
attrs = {'LEADER.DBREF': new_leader}
yield think_fn_wrappers.set_attrs(self.protocol, self.dbref, attrs)
def pemit_throughout_zone(self, message):
"""
Sends a message to the entire arena.
:param str message: The message to send.
"""
# We do the setdiff() here to remove dupes.
announce_cmd = "@dol [setdiff(zwho({dbref}),)]=@pemit ##={message}".format(
dbref=self.dbref, message=message)
self.protocol.write(announce_cmd)
def do_strategic_tic(self):
"""
For now, we use smallish maps and get the AI to stumble into the
defenders. We could get smarter and more precise down the road,
but this will do for now.
"""
raise NotImplementedError("Implement do_strategic_tic()")
def save_player_tics(self):
"""
Saves all human player tics.
"""
for unit in self.unit_store.list_human_units():
unit_manipulation.save_unit_tics_to_pilot(self.protocol, unit)
unit_manipulation.save_unit_mechprefs_to_pilot(self.protocol, unit)
@inlineCallbacks
def change_map(self, mmap_or_mapname):
"""
Changes the currently loaded map.
:param mmap_or_mapname: The generated map to load.
:type mmap_or_mapname: MuxMap or str
"""
p = self.protocol
if isinstance(mmap_or_mapname, str):
# This yanks all units off of the map.
yield think_fn_wrappers.btloadmap(p, self.map_dbref, mmap_or_mapname)
self.map_width, self.map_height = yield get_map_dimensions(
p, self.map_dbref)
else:
yield self._populate_arena_map_from_memory(mmap_or_mapname)
self.map_width, self.map_height = mmap_or_mapname.dimensions
# Now we'll put all of the units back on the map.
for unit in self.unit_store.list_all_units():
yield think_fn_wrappers.btsetxy(
p, unit.dbref, self.map_dbref,
self.map_width / 2, self.map_height / 2)
if unit.pilot_dbref:
restore_mechprefs_on_unit(p, unit)
mux_commands.force(p, unit.pilot_dbref, 'startup')
# And reload the staging and puppet OLs.
yield self.reload_observers()
@inlineCallbacks
def _populate_arena_map_from_memory(self, mmap):
"""
Given a MuxMap instance, populate the arena's map from it.
:param MuxMap mmap: The in-memory map instance containing all of
the terrain/elevation data.
"""
p = self.protocol
map_name = '%sx%s' % mmap.dimensions
# This yanks all units off of the map.
yield think_fn_wrappers.btloadmap(p, self.map_dbref, map_name)
# Feed terrain in via btsetmaphex() a whole line at a time.
for y in range(0, mmap.get_map_height()):
yield think_fn_wrappers.btsetmaphex_line(
p, self.map_dbref, y,
mmap.terrain_list[y], mmap.elevation_list[y])
def clear_all_powerups(self):
"""
Clears all powerups off the map.
"""
for powerup in self.unit_store.list_powerup_units():
mux_commands.trigger(self.protocol, powerup.dbref, 'DESTMECH.T')
@inlineCallbacks
def reload_observers(self):
"""
Reloads the observation lounges. This is currently only Staging
and the Puppet OL.
"""
p = self.protocol
map_width, map_height = yield get_map_dimensions(p, self.map_dbref)
for ol_dbref in [self.staging_dbref, self.puppet_ol_dbref]:
yield think_fn_wrappers.btsetxy(
p, ol_dbref, self.map_dbref, map_width / 2, map_height / 2)
mux_commands.force(p, ol_dbref, 'startup ov')
#
## Begin event handling
#
def handle_unit_change(self, old_unit, new_unit, changes):
"""
This gets called by the unit store whenever a unit's state changes.
We can react strategically.
:param ArenaMapUnit old_unit: The old version of the unit in the
store. This doesn't have the new changes that were picked up.
:param ArenaMapUnit new_unit: The new unit instance generated from
polling the units on the map. The store will copy over the
changed attributes from this instance to ``old_unit`` after this
handler runs.
:param list changes: A list of attribute names that changed on
the ``new_unit`` compared to ``old_unit``.
"""
raise NotImplementedError("Implement handle_unit_change()")
@inlineCallbacks
def handle_unit_destruction(self, victim_unit, killer_unit):
"""
Triggered when a unit is destroyed. Human, AI, or otherwise.
:type victim_unit: ArenaMapUnit or None
:param victim_unit: The unit who was killed.
:type killer_unit: ArenaMapUnit or None
:param killer_unit: The unit who did the killing.
"""
if not (victim_unit and killer_unit):
# TODO: We probably want to handle this somehow. Could have been
# killed by the environment or @damage.
return
yield record_kill(self, victim_unit, killer_unit)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import json
import socket
import time
from typing import Any, Dict, Iterable, List, Mapping, Optional, Union
from urllib.error import HTTPError, URLError
import jenkins
from jenkins import Jenkins, JenkinsException
from requests import Request
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
JenkinsRequest = Mapping[str, Any]
ParamType = Optional[Union[str, Dict, List]]
def jenkins_request_with_headers(jenkins_server: Jenkins, req: Request) -> Optional[JenkinsRequest]:
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
f"Error communicating with server[{jenkins_server.server}]: empty response"
)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to distinguish errors.
if e.code in [401, 403, 500]:
raise JenkinsException(f'Error in request. Possibly authentication failed [{e.code}]: {e.reason}')
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException(f'Error in request: {e}')
except URLError as e:
raise JenkinsException(f'Error in request: {e.reason}')
return None
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block provided to jenkins for use in
the API call when triggering a build. (templated)
:type parameters: str, Dict, or List
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
:param allowed_jenkins_states: Iterable of allowed result jenkins states, default is ``['SUCCESS']``
:type allowed_jenkins_states: Optional[Iterable[str]]
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
def __init__(
self,
*,
jenkins_connection_id: str,
job_name: str,
parameters: ParamType = "",
sleep_time: int = 10,
max_try_before_job_appears: int = 10,
allowed_jenkins_states: Optional[Iterable[str]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.job_name = job_name
self.parameters = parameters
self.sleep_time = max(sleep_time, 1)
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
self.allowed_jenkins_states = list(allowed_jenkins_states) if allowed_jenkins_states else ['SUCCESS']
def build_job(self, jenkins_server: Jenkins, params: ParamType = "") -> Optional[JenkinsRequest]:
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:param params: The parameters block to provide to jenkins API call.
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Since params can be either JSON string, dictionary, or list,
# check type and pass to build_job_url
if params and isinstance(params, str):
params = ast.literal_eval(params)
# We need a None to call the non-parametrized jenkins api end point
if not params:
params = None
request = Request(method='POST', url=jenkins_server.build_job_url(self.job_name, params, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location: str, jenkins_server: Jenkins) -> int:
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location += '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(
jenkins_server, Request(method='POST', url=location)
)
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response and 'number' in json_response['executable']:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s', build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException(
"The job hasn't been executed after polling " f"the queue {self.max_try_before_job_appears} times"
)
def get_hook(self) -> JenkinsHook:
"""Instantiate jenkins hook"""
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context: Mapping[Any, Any]) -> Optional[str]:
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator'
)
raise AirflowException(
'The jenkins_connection_id parameter is missing, impossible to trigger the job'
)
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name,
self.jenkins_connection_id,
self.parameters,
)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server, self.parameters)
if jenkins_response:
build_number = self.poll_job_in_queue(jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name, number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job ended with not allowed state.
if build_info['result'] not in self.allowed_jenkins_states:
raise AirflowException(
f"Jenkins job failed, final state : {build_info['result']}. "
f"Find more information on job url : {build_info['url']}"
)
else:
self.log.info('Waiting for job to complete : %s , build %s', self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
raise AirflowException(f'Jenkins job status check failed. Final error was: {err.resp.status}')
except jenkins.JenkinsException as err:
raise AirflowException(
f'Jenkins call failed with error : {err}, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)'
)
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
return None
| |
import re
import logging
from unittest import TestCase
from testfixtures import LogCapture
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from scrapy.exceptions import NotConfigured
from scrapy.downloadermiddlewares.cookies import CookiesMiddleware
class CookiesMiddlewareTest(TestCase):
def assertCookieValEqual(self, first, second, msg=None):
cookievaleq = lambda cv: re.split(';\s*', cv.decode('latin1'))
return self.assertEqual(
sorted(cookievaleq(first)),
sorted(cookievaleq(second)), msg)
def setUp(self):
self.spider = Spider('foo')
self.mw = CookiesMiddleware()
def tearDown(self):
del self.mw
def test_basic(self):
req = Request('http://scrapytest.org/')
assert self.mw.process_request(req, self.spider) is None
assert 'Cookie' not in req.headers
headers = {'Set-Cookie': 'C1=value1; path=/'}
res = Response('http://scrapytest.org/', headers=headers)
assert self.mw.process_response(req, res, self.spider) is res
req2 = Request('http://scrapytest.org/sub1/')
assert self.mw.process_request(req2, self.spider) is None
self.assertEqual(req2.headers.get('Cookie'), b"C1=value1")
def test_setting_false_cookies_enabled(self):
self.assertRaises(
NotConfigured,
CookiesMiddleware.from_crawler,
get_crawler(settings_dict={'COOKIES_ENABLED': False})
)
def test_setting_default_cookies_enabled(self):
self.assertIsInstance(
CookiesMiddleware.from_crawler(get_crawler()),
CookiesMiddleware
)
def test_setting_true_cookies_enabled(self):
self.assertIsInstance(
CookiesMiddleware.from_crawler(
get_crawler(settings_dict={'COOKIES_ENABLED': True})
),
CookiesMiddleware
)
def test_setting_enabled_cookies_debug(self):
crawler = get_crawler(settings_dict={'COOKIES_DEBUG': True})
mw = CookiesMiddleware.from_crawler(crawler)
with LogCapture('scrapy.downloadermiddlewares.cookies',
propagate=False,
level=logging.DEBUG) as l:
req = Request('http://scrapytest.org/')
res = Response('http://scrapytest.org/',
headers={'Set-Cookie': 'C1=value1; path=/'})
mw.process_response(req, res, crawler.spider)
req2 = Request('http://scrapytest.org/sub1/')
mw.process_request(req2, crawler.spider)
l.check(
('scrapy.downloadermiddlewares.cookies',
'DEBUG',
'Received cookies from: <200 http://scrapytest.org/>\n'
'Set-Cookie: C1=value1; path=/\n'),
('scrapy.downloadermiddlewares.cookies',
'DEBUG',
'Sending cookies to: <GET http://scrapytest.org/sub1/>\n'
'Cookie: C1=value1\n'),
)
def test_setting_disabled_cookies_debug(self):
crawler = get_crawler(settings_dict={'COOKIES_DEBUG': False})
mw = CookiesMiddleware.from_crawler(crawler)
with LogCapture('scrapy.downloadermiddlewares.cookies',
propagate=False,
level=logging.DEBUG) as l:
req = Request('http://scrapytest.org/')
res = Response('http://scrapytest.org/',
headers={'Set-Cookie': 'C1=value1; path=/'})
mw.process_response(req, res, crawler.spider)
req2 = Request('http://scrapytest.org/sub1/')
mw.process_request(req2, crawler.spider)
l.check()
def test_do_not_break_on_non_utf8_header(self):
req = Request('http://scrapytest.org/')
assert self.mw.process_request(req, self.spider) is None
assert 'Cookie' not in req.headers
headers = {'Set-Cookie': b'C1=in\xa3valid; path=/',
'Other': b'ignore\xa3me'}
res = Response('http://scrapytest.org/', headers=headers)
assert self.mw.process_response(req, res, self.spider) is res
req2 = Request('http://scrapytest.org/sub1/')
assert self.mw.process_request(req2, self.spider) is None
self.assertIn('Cookie', req2.headers)
def test_dont_merge_cookies(self):
# merge some cookies into jar
headers = {'Set-Cookie': 'C1=value1; path=/'}
req = Request('http://scrapytest.org/')
res = Response('http://scrapytest.org/', headers=headers)
assert self.mw.process_response(req, res, self.spider) is res
# test Cookie header is not seted to request
req = Request('http://scrapytest.org/dontmerge', meta={'dont_merge_cookies': 1})
assert self.mw.process_request(req, self.spider) is None
assert 'Cookie' not in req.headers
# check that returned cookies are not merged back to jar
res = Response('http://scrapytest.org/dontmerge', headers={'Set-Cookie': 'dont=mergeme; path=/'})
assert self.mw.process_response(req, res, self.spider) is res
# check that cookies are merged back
req = Request('http://scrapytest.org/mergeme')
assert self.mw.process_request(req, self.spider) is None
self.assertEqual(req.headers.get('Cookie'), b'C1=value1')
# check that cookies are merged when dont_merge_cookies is passed as 0
req = Request('http://scrapytest.org/mergeme', meta={'dont_merge_cookies': 0})
assert self.mw.process_request(req, self.spider) is None
self.assertEqual(req.headers.get('Cookie'), b'C1=value1')
def test_complex_cookies(self):
# merge some cookies into jar
cookies = [{'name': 'C1', 'value': 'value1', 'path': '/foo', 'domain': 'scrapytest.org'},
{'name': 'C2', 'value': 'value2', 'path': '/bar', 'domain': 'scrapytest.org'},
{'name': 'C3', 'value': 'value3', 'path': '/foo', 'domain': 'scrapytest.org'},
{'name': 'C4', 'value': 'value4', 'path': '/foo', 'domain': 'scrapy.org'}]
req = Request('http://scrapytest.org/', cookies=cookies)
self.mw.process_request(req, self.spider)
# embed C1 and C3 for scrapytest.org/foo
req = Request('http://scrapytest.org/foo')
self.mw.process_request(req, self.spider)
assert req.headers.get('Cookie') in (b'C1=value1; C3=value3', b'C3=value3; C1=value1')
# embed C2 for scrapytest.org/bar
req = Request('http://scrapytest.org/bar')
self.mw.process_request(req, self.spider)
self.assertEqual(req.headers.get('Cookie'), b'C2=value2')
# embed nothing for scrapytest.org/baz
req = Request('http://scrapytest.org/baz')
self.mw.process_request(req, self.spider)
assert 'Cookie' not in req.headers
def test_merge_request_cookies(self):
req = Request('http://scrapytest.org/', cookies={'galleta': 'salada'})
assert self.mw.process_request(req, self.spider) is None
self.assertEqual(req.headers.get('Cookie'), b'galleta=salada')
headers = {'Set-Cookie': 'C1=value1; path=/'}
res = Response('http://scrapytest.org/', headers=headers)
assert self.mw.process_response(req, res, self.spider) is res
req2 = Request('http://scrapytest.org/sub1/')
assert self.mw.process_request(req2, self.spider) is None
self.assertCookieValEqual(req2.headers.get('Cookie'), b"C1=value1; galleta=salada")
def test_cookiejar_key(self):
req = Request('http://scrapytest.org/', cookies={'galleta': 'salada'}, meta={'cookiejar': "store1"})
assert self.mw.process_request(req, self.spider) is None
self.assertEqual(req.headers.get('Cookie'), b'galleta=salada')
headers = {'Set-Cookie': 'C1=value1; path=/'}
res = Response('http://scrapytest.org/', headers=headers, request=req)
assert self.mw.process_response(req, res, self.spider) is res
req2 = Request('http://scrapytest.org/', meta=res.meta)
assert self.mw.process_request(req2, self.spider) is None
self.assertCookieValEqual(req2.headers.get('Cookie'), b'C1=value1; galleta=salada')
req3 = Request('http://scrapytest.org/', cookies={'galleta': 'dulce'}, meta={'cookiejar': "store2"})
assert self.mw.process_request(req3, self.spider) is None
self.assertEqual(req3.headers.get('Cookie'), b'galleta=dulce')
headers = {'Set-Cookie': 'C2=value2; path=/'}
res2 = Response('http://scrapytest.org/', headers=headers, request=req3)
assert self.mw.process_response(req3, res2, self.spider) is res2
req4 = Request('http://scrapytest.org/', meta=res2.meta)
assert self.mw.process_request(req4, self.spider) is None
self.assertCookieValEqual(req4.headers.get('Cookie'), b'C2=value2; galleta=dulce')
#cookies from hosts with port
req5_1 = Request('http://scrapytest.org:1104/')
assert self.mw.process_request(req5_1, self.spider) is None
headers = {'Set-Cookie': 'C1=value1; path=/'}
res5_1 = Response('http://scrapytest.org:1104/', headers=headers, request=req5_1)
assert self.mw.process_response(req5_1, res5_1, self.spider) is res5_1
req5_2 = Request('http://scrapytest.org:1104/some-redirected-path')
assert self.mw.process_request(req5_2, self.spider) is None
self.assertEqual(req5_2.headers.get('Cookie'), b'C1=value1')
req5_3 = Request('http://scrapytest.org/some-redirected-path')
assert self.mw.process_request(req5_3, self.spider) is None
self.assertEqual(req5_3.headers.get('Cookie'), b'C1=value1')
#skip cookie retrieval for not http request
req6 = Request('file:///scrapy/sometempfile')
assert self.mw.process_request(req6, self.spider) is None
self.assertEqual(req6.headers.get('Cookie'), None)
def test_local_domain(self):
request = Request("http://example-host/", cookies={'currencyCookie': 'USD'})
assert self.mw.process_request(request, self.spider) is None
self.assertIn('Cookie', request.headers)
self.assertEqual(b'currencyCookie=USD', request.headers['Cookie'])
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductRecommendation'
db.create_table('catalogue_productrecommendation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('primary', self.gf('django.db.models.fields.related.ForeignKey')(related_name='primary_recommendations', to=orm['catalogue.Product'])),
('recommendation', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('ranking', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
))
db.send_create_signal('catalogue', ['ProductRecommendation'])
# Adding model 'ProductClass'
db.create_table('catalogue_productclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=128, db_index=True)),
))
db.send_create_signal('catalogue', ['ProductClass'])
# Adding M2M table for field options on 'ProductClass'
db.create_table('catalogue_productclass_options', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('productclass', models.ForeignKey(orm['catalogue.productclass'], null=False)),
('option', models.ForeignKey(orm['catalogue.option'], null=False))
))
db.create_unique('catalogue_productclass_options', ['productclass_id', 'option_id'])
# Adding model 'Category'
db.create_table('catalogue_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('path', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('depth', self.gf('django.db.models.fields.PositiveIntegerField')()),
('numchild', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('slug',
self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
('full_name',
self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('catalogue', ['Category'])
# Adding model 'ProductCategory'
db.create_table('catalogue_productcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Category'])),
('is_canonical', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
))
db.send_create_signal('catalogue', ['ProductCategory'])
# Adding model 'Product'
db.create_table('catalogue_product', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('upc', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=64, null=True, blank=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='variants', null=True, to=orm['catalogue.Product'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('product_class', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.ProductClass'], null=True)),
('score', self.gf('django.db.models.fields.FloatField')(default=0.0, db_index=True)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
))
db.send_create_signal('catalogue', ['Product'])
# Adding M2M table for field product_options on 'Product'
db.create_table('catalogue_product_product_options', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('product', models.ForeignKey(orm['catalogue.product'], null=False)),
('option', models.ForeignKey(orm['catalogue.option'], null=False))
))
db.create_unique('catalogue_product_product_options', ['product_id', 'option_id'])
# Adding M2M table for field related_products on 'Product'
db.create_table('catalogue_product_related_products', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_product', models.ForeignKey(orm['catalogue.product'], null=False)),
('to_product', models.ForeignKey(orm['catalogue.product'], null=False))
))
db.create_unique('catalogue_product_related_products', ['from_product_id', 'to_product_id'])
# Adding model 'ContributorRole'
db.create_table('catalogue_contributorrole', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('name_plural', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
))
db.send_create_signal('catalogue', ['ContributorRole'])
# Adding model 'Contributor'
db.create_table('catalogue_contributor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
))
db.send_create_signal('catalogue', ['Contributor'])
# Adding model 'ProductContributor'
db.create_table('catalogue_productcontributor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('contributor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Contributor'])),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.ContributorRole'])),
))
db.send_create_signal('catalogue', ['ProductContributor'])
# Adding model 'ProductAttribute'
db.create_table('catalogue_productattribute', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product_class', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='attributes', null=True, to=orm['catalogue.ProductClass'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('code', self.gf('django.db.models.fields.SlugField')(max_length=128, db_index=True)),
('type', self.gf('django.db.models.fields.CharField')(default='text', max_length=20)),
('option_group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.AttributeOptionGroup'], null=True, blank=True)),
('entity_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.AttributeEntityType'], null=True, blank=True)),
('required', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('catalogue', ['ProductAttribute'])
# Adding model 'ProductAttributeValue'
db.create_table('catalogue_productattributevalue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('attribute', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.ProductAttribute'])),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('value_text', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('value_integer', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('value_boolean', self.gf('django.db.models.fields.BooleanField')(default=False)),
('value_float', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('value_richtext', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('value_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('value_option', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.AttributeOption'], null=True, blank=True)),
('value_entity', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.AttributeEntity'], null=True, blank=True)),
))
db.send_create_signal('catalogue', ['ProductAttributeValue'])
# Adding model 'AttributeOptionGroup'
db.create_table('catalogue_attributeoptiongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
))
db.send_create_signal('catalogue', ['AttributeOptionGroup'])
# Adding model 'AttributeOption'
db.create_table('catalogue_attributeoption', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='options', to=orm['catalogue.AttributeOptionGroup'])),
('option', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('catalogue', ['AttributeOption'])
# Adding model 'AttributeEntity'
db.create_table('catalogue_attributeentity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=255, blank=True)),
('type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='entities', to=orm['catalogue.AttributeEntityType'])),
))
db.send_create_signal('catalogue', ['AttributeEntity'])
# Adding model 'AttributeEntityType'
db.create_table('catalogue_attributeentitytype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=255, blank=True)),
))
db.send_create_signal('catalogue', ['AttributeEntityType'])
# Adding model 'Option'
db.create_table('catalogue_option', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('code', self.gf('django.db.models.fields.SlugField')(max_length=128, db_index=True)),
('type', self.gf('django.db.models.fields.CharField')(default='Required', max_length=128)),
))
db.send_create_signal('catalogue', ['Option'])
# Adding model 'ProductImage'
db.create_table('catalogue_productimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(related_name='images', to=orm['catalogue.Product'])),
('original', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('caption', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('display_order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('catalogue', ['ProductImage'])
# Adding unique constraint on 'ProductImage', fields ['product', 'display_order']
db.create_unique('catalogue_productimage', ['product_id', 'display_order'])
def backwards(self, orm):
# Removing unique constraint on 'ProductImage', fields ['product', 'display_order']
db.delete_unique('catalogue_productimage', ['product_id', 'display_order'])
# Deleting model 'ProductRecommendation'
db.delete_table('catalogue_productrecommendation')
# Deleting model 'ProductClass'
db.delete_table('catalogue_productclass')
# Removing M2M table for field options on 'ProductClass'
db.delete_table('catalogue_productclass_options')
# Deleting model 'Category'
db.delete_table('catalogue_category')
# Deleting model 'ProductCategory'
db.delete_table('catalogue_productcategory')
# Deleting model 'Product'
db.delete_table('catalogue_product')
# Removing M2M table for field product_options on 'Product'
db.delete_table('catalogue_product_product_options')
# Removing M2M table for field related_products on 'Product'
db.delete_table('catalogue_product_related_products')
# Deleting model 'ContributorRole'
db.delete_table('catalogue_contributorrole')
# Deleting model 'Contributor'
db.delete_table('catalogue_contributor')
# Deleting model 'ProductContributor'
db.delete_table('catalogue_productcontributor')
# Deleting model 'ProductAttribute'
db.delete_table('catalogue_productattribute')
# Deleting model 'ProductAttributeValue'
db.delete_table('catalogue_productattributevalue')
# Deleting model 'AttributeOptionGroup'
db.delete_table('catalogue_attributeoptiongroup')
# Deleting model 'AttributeOption'
db.delete_table('catalogue_attributeoption')
# Deleting model 'AttributeEntity'
db.delete_table('catalogue_attributeentity')
# Deleting model 'AttributeEntityType'
db.delete_table('catalogue_attributeentitytype')
# Deleting model 'Option'
db.delete_table('catalogue_option')
# Deleting model 'ProductImage'
db.delete_table('catalogue_productimage')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'full_name': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length':
'255', 'db_index': 'True'})
},
'catalogue.contributor': {
'Meta': {'object_name': 'Contributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
},
'catalogue.contributorrole': {
'Meta': {'object_name': 'ContributorRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name_plural': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'})
},
'catalogue.productcontributor': {
'Meta': {'object_name': 'ProductContributor'},
'contributor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Contributor']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ContributorRole']"})
},
'catalogue.productimage': {
'Meta': {'ordering': "['display_order']", 'unique_together': "(('product', 'display_order'),)", 'object_name': 'ProductImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['catalogue.Product']"})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
}
}
complete_apps = ['catalogue']
| |
import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q, refs_aggregate
from django.utils import six, timezone
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def __init__(self, output_field=None):
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
existing_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == lhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, extra=None):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(Expression):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.lookup, self.lookup_type)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, expression, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(Expression):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def __repr__(self):
return "{}({}, {}, {})".format(
self.__class__.__name__, self.lookup, self.lookup_type, self.tzinfo)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, expression, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import argparse
import os
import sys
# External imports
# Bokeh imports
from bokeh.command.bootstrap import main
from bokeh._testing.util.filesystem import TmpDir, WorkingDir, with_directory_contents
from . import basic_svg_scatter_script, multi_svg_scatter_script
# Module under test
import bokeh.command.subcommands.svg as scsvg
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
is_python2 = sys.version_info[0] == 2
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scsvg.SVG(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scsvg.SVG.name == "svg"
def test_help():
assert scsvg.SVG.help == "Create standalone SVG files for one or more applications"
def test_args():
assert scsvg.SVG.args == (
('files', dict(
metavar='DIRECTORY-OR-SCRIPT',
nargs='+',
help="The app directories or scripts to generate SVG for",
default=None,
)),
('--height', dict(
metavar='HEIGHT',
type=int,
help="The desired height of the exported layout obj only if it's a Plot instance",
default=None,
)),
('--width', dict(
metavar='WIDTH',
type=int,
help="The desired width of the exported layout obj only if it's a Plot instance",
default=None,
)),
(('-o', '--output'), dict(
metavar='FILENAME',
action='append',
type=str,
help="Name of the output file or - for standard output."
)),
('--args', dict(
metavar='COMMAND-LINE-ARGS',
nargs=argparse.REMAINDER,
help="Any command line arguments remaining are passed on to the application handler",
)),
)
def test_no_script(capsys):
with (TmpDir(prefix="bokeh-svg-no-script")) as dirname:
with WorkingDir(dirname):
with pytest.raises(SystemExit):
main(["bokeh", "svg"])
out, err = capsys.readouterr()
if is_python2:
too_few = "too few arguments"
else:
too_few = "the following arguments are required: DIRECTORY-OR-SCRIPT"
assert err == """usage: bokeh svg [-h] [--height HEIGHT] [--width WIDTH] [-o FILENAME]
[--args ...]
DIRECTORY-OR-SCRIPT [DIRECTORY-OR-SCRIPT ...]
bokeh svg: error: %s
""" % (too_few)
assert out == ""
@pytest.mark.unit
@pytest.mark.selenium
def test_basic_script(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["scatter.svg", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_svg_scatter_script }, run)
@pytest.mark.unit
@pytest.mark.selenium
def test_basic_script_with_output_after(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "scatter.py", "--output", "foo.svg"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.svg", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_svg_scatter_script }, run)
@pytest.mark.unit
@pytest.mark.selenium
def test_basic_script_with_output_before(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "--output", "foo.svg", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["foo.svg", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_svg_scatter_script }, run)
@pytest.mark.unit
@pytest.mark.selenium
def test_basic_script_with_output_stdout(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "--output", "-", "scatter.py"])
out, err = capsys.readouterr()
assert len(err) == 0
assert len(out) > 0
assert out.startswith('<svg version=')
assert set(["scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : basic_svg_scatter_script }, run)
@pytest.mark.unit
@pytest.mark.selenium
def test_multiple_svg_scripts(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "scatter1.py", "scatter2.py", "scatter3.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["scatter1.svg", "scatter2.svg", "scatter3.svg", "scatter1.py", "scatter2.py", "scatter3.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter1.py' : basic_svg_scatter_script,
'scatter2.py' : basic_svg_scatter_script,
'scatter3.py' : basic_svg_scatter_script },
run)
@pytest.mark.unit
@pytest.mark.selenium
def test_basic_script_with_multiple_svg_plots(capsys):
def run(dirname):
with WorkingDir(dirname):
main(["bokeh", "svg", "scatter.py"])
out, err = capsys.readouterr()
assert err == ""
assert out == ""
assert set(["scatter.svg", "scatter_1.svg", "scatter.py"]) == set(os.listdir(dirname))
with_directory_contents({ 'scatter.py' : multi_svg_scatter_script },
run)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| |
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Chrome reference builds.
Usage:
$ cd /tmp
$ /path/to/update_reference_build.py -r <revision>
$ cd reference_builds/reference_builds
$ gcl change
$ gcl upload <change>
$ gcl commit <change>
"""
import errno
import logging
import optparse
import os
import shutil
import subprocess
import sys
import time
import urllib
import urllib2
import zipfile
class BuildUpdater(object):
_PLATFORM_FILES_MAP = {
'Win': [
'chrome-win32.zip',
'chrome-win32-syms.zip',
'chrome-win32.test/_pyautolib.pyd',
'chrome-win32.test/pyautolib.py',
],
'Mac': [
'chrome-mac.zip',
'chrome-mac.test/_pyautolib.so',
'chrome-mac.test/pyautolib.py',
],
'Linux': [
'chrome-linux.zip',
],
'Linux_x64': [
'chrome-linux.zip',
],
}
_PLATFORM_DEST_MAP = {
'Linux': 'chrome_linux',
'Linux_x64': 'chrome_linux64',
'Win': 'chrome_win',
'Mac': 'chrome_mac',
}
def __init__(self, options):
self._platforms = options.platforms.split(',')
self._revision = int(options.revision)
@staticmethod
def _GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell)
stdout, stderr = p.communicate()
exit_code = p.returncode
if stderr:
logging.critical(stderr)
logging.info(stdout)
return (exit_code, stdout)
def _GetBuildUrl(self, platform, revision, filename):
URL_FMT = ('http://commondatastorage.googleapis.com/'
'chromium-browser-snapshots/%s/%s/%s')
return URL_FMT % (urllib.quote_plus(platform), revision, filename)
def _FindBuildRevision(self, platform, revision, filename):
MAX_REVISIONS_PER_BUILD = 100
for revision_guess in xrange(revision, revision + MAX_REVISIONS_PER_BUILD):
r = urllib2.Request(self._GetBuildUrl(platform, revision_guess, filename))
r.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(r)
return revision_guess
except urllib2.HTTPError, err:
if err.code == 404:
time.sleep(.1)
continue
return None
def _DownloadBuilds(self):
for platform in self._platforms:
for f in BuildUpdater._PLATFORM_FILES_MAP[platform]:
output = os.path.join('dl', platform,
'%s_%s_%s' % (platform, self._revision, f))
if os.path.exists(output):
logging.info('%s alread exists, skipping download' % output)
continue
build_revision = self._FindBuildRevision(platform, self._revision, f)
if not build_revision:
logging.critical('Failed to find %s build for r%s\n' % (
platform, self._revision))
sys.exit(1)
dirname = os.path.dirname(output)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
url = self._GetBuildUrl(platform, build_revision, f)
logging.info('Downloading %s, saving to %s' % (url, output))
r = urllib2.urlopen(url)
with file(output, 'wb') as f:
f.write(r.read())
def _FetchSvnRepos(self):
if not os.path.exists('reference_builds'):
os.makedirs('reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'config',
'svn://svn.chromium.org/chrome/trunk/deps/reference_builds'],
'reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'sync'], 'reference_builds')
def _UnzipFile(self, dl_file, dest_dir):
if not zipfile.is_zipfile(dl_file):
return False
logging.info('Opening %s' % dl_file)
with zipfile.ZipFile(dl_file, 'r') as z:
for content in z.namelist():
dest = os.path.join(dest_dir, content[content.find('/')+1:])
if not os.path.basename(dest):
if not os.path.isdir(dest):
os.makedirs(dest)
continue
with z.open(content) as unzipped_content:
logging.info('Extracting %s to %s (%s)' % (content, dest, dl_file))
with file(dest, 'wb') as dest_file:
dest_file.write(unzipped_content.read())
permissions = z.getinfo(content).external_attr >> 16
if permissions:
os.chmod(dest, permissions)
return True
def _ClearDir(self, dir):
"""Clears all files in |dir| except for hidden files and folders."""
for root, dirs, files in os.walk(dir):
# Skip hidden files and folders (like .svn and .git).
files = [f for f in files if f[0] != '.']
dirs[:] = [d for d in dirs if d[0] != '.']
for f in files:
os.remove(os.path.join(root, f))
def _ExtractBuilds(self):
for platform in self._platforms:
if os.path.exists('tmp_unzip'):
os.path.unlink('tmp_unzip')
dest_dir = os.path.join('reference_builds', 'reference_builds',
BuildUpdater._PLATFORM_DEST_MAP[platform])
self._ClearDir(dest_dir)
for root, _, dl_files in os.walk(os.path.join('dl', platform)):
for dl_file in dl_files:
dl_file = os.path.join(root, dl_file)
if not self._UnzipFile(dl_file, dest_dir):
logging.info('Copying %s to %s' % (dl_file, dest_dir))
shutil.copy(dl_file, dest_dir)
def _SvnAddAndRemove(self):
svn_dir = os.path.join('reference_builds', 'reference_builds')
stat = BuildUpdater._GetCmdStatusAndOutput(['svn', 'stat'], svn_dir)[1]
for line in stat.splitlines():
action, filename = line.split(None, 1)
if action == '?':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'add', filename], svn_dir)
elif action == '!':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'delete', filename], svn_dir)
filepath = os.path.join(svn_dir, filename)
if not os.path.isdir(filepath) and os.access(filepath, os.X_OK):
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'propset', 'svn:executable', 'true', filename], svn_dir)
def DownloadAndUpdateBuilds(self):
self._DownloadBuilds()
self._FetchSvnRepos()
self._ExtractBuilds()
self._SvnAddAndRemove()
def ParseOptions(argv):
parser = optparse.OptionParser()
usage = 'usage: %prog <options>'
parser.set_usage(usage)
parser.add_option('-r', dest='revision',
help='Revision to pickup')
parser.add_option('-p', dest='platforms',
default='Win,Mac,Linux,Linux_x64',
help='Comma separated list of platforms to download '
'(as defined by the chromium builders).')
(options, _) = parser.parse_args(argv)
if not options.revision:
logging.critical('Must specify -r\n')
sys.exit(1)
return options
def main(argv):
logging.getLogger().setLevel(logging.DEBUG)
options = ParseOptions(argv)
b = BuildUpdater(options)
b.DownloadAndUpdateBuilds()
logging.info('Successfully updated reference builds. Move to '
'reference_builds/reference_builds and make a change with gcl.')
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
from __future__ import unicode_literals
import re
import string
from datetime import date
from datetime import datetime
from datetime import time
from ._compat import PY2
from ._compat import PY38
from ._compat import decode
from ._compat import long
from ._compat import unicode
from ._utils import escape_string
if PY2:
from pipenv.vendor.backports.enum import Enum
from pipenv.vendor.backports.functools_lru_cache import lru_cache
else:
from enum import Enum
from functools import lru_cache
def item(value, _parent=None, _sort_keys=False):
from .container import Container
if isinstance(value, Item):
return value
if isinstance(value, bool):
return Bool(value, Trivia())
elif isinstance(value, int):
return Integer(value, Trivia(), str(value))
elif isinstance(value, float):
return Float(value, Trivia(), str(value))
elif isinstance(value, dict):
val = Table(Container(), Trivia(), False)
for k, v in sorted(
value.items(),
key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1),
):
val[k] = item(v, _parent=val, _sort_keys=_sort_keys)
return val
elif isinstance(value, list):
if value and isinstance(value[0], dict):
a = AoT([])
else:
a = Array([], Trivia())
for v in value:
if isinstance(v, dict):
table = Table(Container(), Trivia(), True)
for k, _v in sorted(
v.items(),
key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1),
):
i = item(_v, _sort_keys=_sort_keys)
if isinstance(table, InlineTable):
i.trivia.trail = ""
table[k] = item(i, _sort_keys=_sort_keys)
v = table
a.append(v)
return a
elif isinstance(value, (str, unicode)):
escaped = escape_string(value)
return String(StringType.SLB, decode(value), escaped, Trivia())
elif isinstance(value, datetime):
return DateTime(
value.year,
value.month,
value.day,
value.hour,
value.minute,
value.second,
value.microsecond,
value.tzinfo,
Trivia(),
value.isoformat().replace("+00:00", "Z"),
)
elif isinstance(value, date):
return Date(value.year, value.month, value.day, Trivia(), value.isoformat())
elif isinstance(value, time):
return Time(
value.hour,
value.minute,
value.second,
value.microsecond,
value.tzinfo,
Trivia(),
value.isoformat(),
)
raise ValueError("Invalid type {}".format(type(value)))
class StringType(Enum):
# Single Line Basic
SLB = '"'
# Multi Line Basic
MLB = '"""'
# Single Line Literal
SLL = "'"
# Multi Line Literal
MLL = "'''"
@property
@lru_cache(maxsize=None)
def unit(self): # type: () -> str
return self.value[0]
@lru_cache(maxsize=None)
def is_basic(self): # type: () -> bool
return self in {StringType.SLB, StringType.MLB}
@lru_cache(maxsize=None)
def is_literal(self): # type: () -> bool
return self in {StringType.SLL, StringType.MLL}
@lru_cache(maxsize=None)
def is_singleline(self): # type: () -> bool
return self in {StringType.SLB, StringType.SLL}
@lru_cache(maxsize=None)
def is_multiline(self): # type: () -> bool
return self in {StringType.MLB, StringType.MLL}
@lru_cache(maxsize=None)
def toggle(self): # type: () -> StringType
return {
StringType.SLB: StringType.MLB,
StringType.MLB: StringType.SLB,
StringType.SLL: StringType.MLL,
StringType.MLL: StringType.SLL,
}[self]
class BoolType(Enum):
TRUE = "true"
FALSE = "false"
@lru_cache(maxsize=None)
def __bool__(self):
return {BoolType.TRUE: True, BoolType.FALSE: False}[self]
if PY2:
__nonzero__ = __bool__ # for PY2
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
class Trivia:
"""
Trivia information (aka metadata).
"""
def __init__(
self, indent=None, comment_ws=None, comment=None, trail=None
): # type: (str, str, str, str) -> None
# Whitespace before a value.
self.indent = indent or ""
# Whitespace after a value, but before a comment.
self.comment_ws = comment_ws or ""
# Comment, starting with # character, or empty string if no comment.
self.comment = comment or ""
# Trailing newline.
if trail is None:
trail = "\n"
self.trail = trail
class KeyType(Enum):
"""
The type of a Key.
Keys can be bare (unquoted), or quoted using basic ("), or literal (')
quotes following the same escaping rules as single-line StringType.
"""
Bare = ""
Basic = '"'
Literal = "'"
class Key:
"""
A key value.
"""
def __init__(
self, k, t=None, sep=None, dotted=False, original=None
): # type: (str, Optional[KeyType], Optional[str], bool, Optional[str]) -> None
if t is None:
if any(
[c not in string.ascii_letters + string.digits + "-" + "_" for c in k]
):
t = KeyType.Basic
else:
t = KeyType.Bare
self.t = t
if sep is None:
sep = " = "
self.sep = sep
self.key = k
if original is None:
original = k
self._original = original
self._dotted = dotted
@property
def delimiter(self): # type: () -> str
return self.t.value
def is_dotted(self): # type: () -> bool
return self._dotted
def is_bare(self): # type: () -> bool
return self.t == KeyType.Bare
def as_string(self): # type: () -> str
return "{}{}{}".format(self.delimiter, self._original, self.delimiter)
def __hash__(self): # type: () -> int
return hash(self.key)
def __eq__(self, other): # type: (Key) -> bool
if isinstance(other, Key):
return self.key == other.key
return self.key == other
def __str__(self): # type: () -> str
return self.as_string()
def __repr__(self): # type: () -> str
return "<Key {}>".format(self.as_string())
class Item(object):
"""
An item within a TOML document.
"""
def __init__(self, trivia): # type: (Trivia) -> None
self._trivia = trivia
@property
def trivia(self): # type: () -> Trivia
return self._trivia
@property
def discriminant(self): # type: () -> int
raise NotImplementedError()
def as_string(self): # type: () -> str
raise NotImplementedError()
# Helpers
def comment(self, comment): # type: (str) -> Item
if not comment.strip().startswith("#"):
comment = "# " + comment
self._trivia.comment_ws = " "
self._trivia.comment = comment
return self
def indent(self, indent): # type: (int) -> Item
if self._trivia.indent.startswith("\n"):
self._trivia.indent = "\n" + " " * indent
else:
self._trivia.indent = " " * indent
return self
def is_boolean(self): # type: () -> bool
return isinstance(self, Bool)
def is_table(self): # type: () -> bool
return isinstance(self, Table)
def is_inline_table(self): # type: () -> bool
return isinstance(self, InlineTable)
def is_aot(self): # type: () -> bool
return isinstance(self, AoT)
def _getstate(self, protocol=3):
return (self._trivia,)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return self.__class__, self._getstate(protocol)
class Whitespace(Item):
"""
A whitespace literal.
"""
def __init__(self, s, fixed=False): # type: (str, bool) -> None
self._s = s
self._fixed = fixed
@property
def s(self): # type: () -> str
return self._s
@property
def value(self): # type: () -> str
return self._s
@property
def trivia(self): # type: () -> Trivia
raise RuntimeError("Called trivia on a Whitespace variant.")
@property
def discriminant(self): # type: () -> int
return 0
def is_fixed(self): # type: () -> bool
return self._fixed
def as_string(self): # type: () -> str
return self._s
def __repr__(self): # type: () -> str
return "<{} {}>".format(self.__class__.__name__, repr(self._s))
def _getstate(self, protocol=3):
return self._s, self._fixed
class Comment(Item):
"""
A comment literal.
"""
@property
def discriminant(self): # type: () -> int
return 1
def as_string(self): # type: () -> str
return "{}{}{}".format(
self._trivia.indent, decode(self._trivia.comment), self._trivia.trail
)
def __str__(self): # type: () -> str
return "{}{}".format(self._trivia.indent, decode(self._trivia.comment))
class Integer(long, Item):
"""
An integer literal.
"""
def __new__(cls, value, trivia, raw): # type: (int, Trivia, str) -> Integer
return super(Integer, cls).__new__(cls, value)
def __init__(self, _, trivia, raw): # type: (int, Trivia, str) -> None
super(Integer, self).__init__(trivia)
self._raw = raw
self._sign = False
if re.match(r"^[+\-]\d+$", raw):
self._sign = True
@property
def discriminant(self): # type: () -> int
return 2
@property
def value(self): # type: () -> int
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(Integer, self).__add__(other)
return self._new(result)
def __radd__(self, other):
result = super(Integer, self).__radd__(other)
if isinstance(other, Integer):
return self._new(result)
return result
def __sub__(self, other):
result = super(Integer, self).__sub__(other)
return self._new(result)
def __rsub__(self, other):
result = super(Integer, self).__rsub__(other)
if isinstance(other, Integer):
return self._new(result)
return result
def _new(self, result):
raw = str(result)
if self._sign:
sign = "+" if result >= 0 else "-"
raw = sign + raw
return Integer(result, self._trivia, raw)
def _getstate(self, protocol=3):
return int(self), self._trivia, self._raw
class Float(float, Item):
"""
A float literal.
"""
def __new__(cls, value, trivia, raw): # type: (float, Trivia, str) -> Integer
return super(Float, cls).__new__(cls, value)
def __init__(self, _, trivia, raw): # type: (float, Trivia, str) -> None
super(Float, self).__init__(trivia)
self._raw = raw
self._sign = False
if re.match(r"^[+\-].+$", raw):
self._sign = True
@property
def discriminant(self): # type: () -> int
return 3
@property
def value(self): # type: () -> float
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
result = super(Float, self).__add__(other)
return self._new(result)
def __radd__(self, other):
result = super(Float, self).__radd__(other)
if isinstance(other, Float):
return self._new(result)
return result
def __sub__(self, other):
result = super(Float, self).__sub__(other)
return self._new(result)
def __rsub__(self, other):
result = super(Float, self).__rsub__(other)
if isinstance(other, Float):
return self._new(result)
return result
def _new(self, result):
raw = str(result)
if self._sign:
sign = "+" if result >= 0 else "-"
raw = sign + raw
return Float(result, self._trivia, raw)
def _getstate(self, protocol=3):
return float(self), self._trivia, self._raw
class Bool(Item):
"""
A boolean literal.
"""
def __init__(self, t, trivia): # type: (int, Trivia) -> None
super(Bool, self).__init__(trivia)
self._value = bool(t)
@property
def discriminant(self): # type: () -> int
return 4
@property
def value(self): # type: () -> bool
return self._value
def as_string(self): # type: () -> str
return str(self._value).lower()
def _getstate(self, protocol=3):
return self._value, self._trivia
def __bool__(self):
return self._value
__nonzero__ = __bool__
def __eq__(self, other):
if not isinstance(other, bool):
return NotImplemented
return other == self._value
def __hash__(self):
return hash(self._value)
def __repr__(self):
return repr(self._value)
class DateTime(Item, datetime):
"""
A datetime literal.
"""
def __new__(
cls,
year,
month,
day,
hour,
minute,
second,
microsecond,
tzinfo,
trivia,
raw,
**kwargs
): # type: (int, int, int, int, int, int, int, Optional[datetime.tzinfo], Trivia, str, Any) -> datetime
return datetime.__new__(
cls,
year,
month,
day,
hour,
minute,
second,
microsecond,
tzinfo=tzinfo,
**kwargs
)
def __init__(
self, year, month, day, hour, minute, second, microsecond, tzinfo, trivia, raw
): # type: (int, int, int, int, int, int, int, Optional[datetime.tzinfo], Trivia, str) -> None
super(DateTime, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 5
@property
def value(self): # type: () -> datetime
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
if PY38:
result = datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
).__add__(other)
else:
result = super(DateTime, self).__add__(other)
return self._new(result)
def __sub__(self, other):
if PY38:
result = datetime(
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
).__sub__(other)
else:
result = super(DateTime, self).__sub__(other)
if isinstance(result, datetime):
result = self._new(result)
return result
def _new(self, result):
raw = result.isoformat()
return DateTime(
result.year,
result.month,
result.day,
result.hour,
result.minute,
result.second,
result.microsecond,
result.tzinfo,
self._trivia,
raw,
)
def _getstate(self, protocol=3):
return (
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
self._trivia,
self._raw,
)
class Date(Item, date):
"""
A date literal.
"""
def __new__(cls, year, month, day, *_): # type: (int, int, int, Any) -> date
return date.__new__(cls, year, month, day)
def __init__(
self, year, month, day, trivia, raw
): # type: (int, int, int, Trivia, str) -> None
super(Date, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 6
@property
def value(self): # type: () -> date
return self
def as_string(self): # type: () -> str
return self._raw
def __add__(self, other):
if PY38:
result = date(self.year, self.month, self.day).__add__(other)
else:
result = super(Date, self).__add__(other)
return self._new(result)
def __sub__(self, other):
if PY38:
result = date(self.year, self.month, self.day).__sub__(other)
else:
result = super(Date, self).__sub__(other)
if isinstance(result, date):
result = self._new(result)
return result
def _new(self, result):
raw = result.isoformat()
return Date(result.year, result.month, result.day, self._trivia, raw)
def _getstate(self, protocol=3):
return (self.year, self.month, self.day, self._trivia, self._raw)
class Time(Item, time):
"""
A time literal.
"""
def __new__(
cls, hour, minute, second, microsecond, tzinfo, *_
): # type: (int, int, int, int, Optional[datetime.tzinfo], Any) -> time
return time.__new__(cls, hour, minute, second, microsecond, tzinfo)
def __init__(
self, hour, minute, second, microsecond, tzinfo, trivia, raw
): # type: (int, int, int, int, Optional[datetime.tzinfo], Trivia, str) -> None
super(Time, self).__init__(trivia)
self._raw = raw
@property
def discriminant(self): # type: () -> int
return 7
@property
def value(self): # type: () -> time
return self
def as_string(self): # type: () -> str
return self._raw
def _getstate(self, protocol=3):
return (
self.hour,
self.minute,
self.second,
self.microsecond,
self.tzinfo,
self._trivia,
self._raw,
)
class Array(Item, list):
"""
An array literal
"""
def __init__(
self, value, trivia, multiline=False
): # type: (list, Trivia, bool) -> None
super(Array, self).__init__(trivia)
list.__init__(
self, [v.value for v in value if not isinstance(v, (Whitespace, Comment))]
)
self._value = value
self._multiline = multiline
@property
def discriminant(self): # type: () -> int
return 8
@property
def value(self): # type: () -> list
return self
def multiline(self, multiline): # type: (bool) -> self
self._multiline = multiline
return self
def as_string(self): # type: () -> str
if not self._multiline:
return "[{}]".format("".join(v.as_string() for v in self._value))
s = "[\n" + self.trivia.indent + " " * 4
s += (",\n" + self.trivia.indent + " " * 4).join(
v.as_string() for v in self._value if not isinstance(v, Whitespace)
)
s += ",\n"
s += "]"
return s
def append(self, _item): # type: (Any) -> None
if self._value:
self._value.append(Whitespace(", "))
it = item(_item)
super(Array, self).append(it.value)
self._value.append(it)
if not PY2:
def clear(self):
super(Array, self).clear()
self._value.clear()
def __iadd__(self, other): # type: (list) -> Array
if not isinstance(other, list):
return NotImplemented
for v in other:
self.append(v)
return self
def __delitem__(self, key):
super(Array, self).__delitem__(key)
j = 0 if key >= 0 else -1
for i, v in enumerate(self._value if key >= 0 else reversed(self._value)):
if key < 0:
i = -i - 1
if isinstance(v, (Comment, Whitespace)):
continue
if j == key:
del self._value[i]
if i < 0 and abs(i) > len(self._value):
i += 1
if i < len(self._value) - 1 and isinstance(self._value[i], Whitespace):
del self._value[i]
break
j += 1 if key >= 0 else -1
def __str__(self):
return str(
[v.value for v in self._value if not isinstance(v, (Whitespace, Comment))]
)
def __repr__(self):
return str(self)
def _getstate(self, protocol=3):
return self._value, self._trivia
class Table(Item, dict):
"""
A table literal.
"""
def __init__(
self,
value,
trivia,
is_aot_element,
is_super_table=False,
name=None,
display_name=None,
): # type: (tomlkit.container.Container, Trivia, bool, bool, Optional[str], Optional[str]) -> None
super(Table, self).__init__(trivia)
self.name = name
self.display_name = display_name
self._value = value
self._is_aot_element = is_aot_element
self._is_super_table = is_super_table
for k, v in self._value.body:
if k is not None:
super(Table, self).__setitem__(k.key, v)
@property
def value(self): # type: () -> tomlkit.container.Container
return self._value
@property
def discriminant(self): # type: () -> int
return 9
def add(self, key, item=None): # type: (Union[Key, Item, str], Any) -> Item
if item is None:
if not isinstance(key, (Comment, Whitespace)):
raise ValueError(
"Non comment/whitespace items must have an associated key"
)
key, item = None, key
return self.append(key, item)
def append(self, key, _item): # type: (Union[Key, str], Any) -> Table
"""
Appends a (key, item) to the table.
"""
if not isinstance(_item, Item):
_item = item(_item)
self._value.append(key, _item)
if isinstance(key, Key):
key = key.key
if key is not None:
super(Table, self).__setitem__(key, _item)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return self
indent = m.group(1)
if not isinstance(_item, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent)
if not m:
_item.trivia.indent = indent
else:
_item.trivia.indent = m.group(1) + indent + m.group(2)
return self
def raw_append(self, key, _item): # type: (Union[Key, str], Any) -> Table
if not isinstance(_item, Item):
_item = item(_item)
self._value.append(key, _item)
if isinstance(key, Key):
key = key.key
if key is not None:
super(Table, self).__setitem__(key, _item)
return self
def remove(self, key): # type: (Union[Key, str]) -> Table
self._value.remove(key)
if isinstance(key, Key):
key = key.key
if key is not None:
super(Table, self).__delitem__(key)
return self
def is_aot_element(self): # type: () -> bool
return self._is_aot_element
def is_super_table(self): # type: () -> bool
return self._is_super_table
def as_string(self): # type: () -> str
return self._value.as_string()
# Helpers
def indent(self, indent): # type: (int) -> Table
super(Table, self).indent(indent)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
indent = ""
else:
indent = m.group(1)
for k, item in self._value.body:
if not isinstance(item, Whitespace):
item.trivia.indent = indent + item.trivia.indent
return self
def keys(self): # type: () -> Generator[str]
for k in self._value.keys():
yield k
def values(self): # type: () -> Generator[Item]
for v in self._value.values():
yield v
def items(self): # type: () -> Generator[Item]
for k, v in self._value.items():
yield k, v
def update(self, other): # type: (Dict) -> None
for k, v in other.items():
self[k] = v
def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any
return self._value.get(key, default)
def __contains__(self, key): # type: (Union[Key, str]) -> bool
return key in self._value
def __getitem__(self, key): # type: (Union[Key, str]) -> Item
return self._value[key]
def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None
if not isinstance(value, Item):
value = item(value)
self._value[key] = value
if key is not None:
super(Table, self).__setitem__(key, value)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return
indent = m.group(1)
if not isinstance(value, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent)
if not m:
value.trivia.indent = indent
else:
value.trivia.indent = m.group(1) + indent + m.group(2)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
self.remove(key)
def __repr__(self):
return super(Table, self).__repr__()
def __str__(self):
return str(self.value)
def _getstate(self, protocol=3):
return (
self._value,
self._trivia,
self._is_aot_element,
self._is_super_table,
self.name,
self.display_name,
)
class InlineTable(Item, dict):
"""
An inline table literal.
"""
def __init__(
self, value, trivia, new=False
): # type: (tomlkit.container.Container, Trivia, bool) -> None
super(InlineTable, self).__init__(trivia)
self._value = value
self._new = new
for k, v in self._value.body:
if k is not None:
super(InlineTable, self).__setitem__(k.key, v)
@property
def discriminant(self): # type: () -> int
return 10
@property
def value(self): # type: () -> Dict
return self._value
def append(self, key, _item): # type: (Union[Key, str], Any) -> InlineTable
"""
Appends a (key, item) to the table.
"""
if not isinstance(_item, Item):
_item = item(_item)
if not isinstance(_item, (Whitespace, Comment)):
if not _item.trivia.indent and len(self._value) > 0 and not self._new:
_item.trivia.indent = " "
if _item.trivia.comment:
_item.trivia.comment = ""
self._value.append(key, _item)
if isinstance(key, Key):
key = key.key
if key is not None:
super(InlineTable, self).__setitem__(key, _item)
return self
def remove(self, key): # type: (Union[Key, str]) -> InlineTable
self._value.remove(key)
if isinstance(key, Key):
key = key.key
if key is not None:
super(InlineTable, self).__delitem__(key)
return self
def as_string(self): # type: () -> str
buf = "{"
for i, (k, v) in enumerate(self._value.body):
if k is None:
if i == len(self._value.body) - 1:
if self._new:
buf = buf.rstrip(", ")
else:
buf = buf.rstrip(",")
buf += v.as_string()
continue
buf += "{}{}{}{}{}{}".format(
v.trivia.indent,
k.as_string() + ("." if k.is_dotted() else ""),
k.sep,
v.as_string(),
v.trivia.comment,
v.trivia.trail.replace("\n", ""),
)
if i != len(self._value.body) - 1:
buf += ","
if self._new:
buf += " "
buf += "}"
return buf
def keys(self): # type: () -> Generator[str]
for k in self._value.keys():
yield k
def values(self): # type: () -> Generator[Item]
for v in self._value.values():
yield v
def items(self): # type: () -> Generator[Item]
for k, v in self._value.items():
yield k, v
def update(self, other): # type: (Dict) -> None
for k, v in other.items():
self[k] = v
def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any
return self._value.get(key, default)
def __contains__(self, key): # type: (Union[Key, str]) -> bool
return key in self._value
def __getitem__(self, key): # type: (Union[Key, str]) -> Item
return self._value[key]
def __setitem__(self, key, value): # type: (Union[Key, str], Any) -> None
if not isinstance(value, Item):
value = item(value)
self._value[key] = value
if key is not None:
super(InlineTable, self).__setitem__(key, value)
if value.trivia.comment:
value.trivia.comment = ""
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return
indent = m.group(1)
if not isinstance(value, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent)
if not m:
value.trivia.indent = indent
else:
value.trivia.indent = m.group(1) + indent + m.group(2)
def __delitem__(self, key): # type: (Union[Key, str]) -> None
self.remove(key)
def __repr__(self):
return super(InlineTable, self).__repr__()
def _getstate(self, protocol=3):
return (self._value, self._trivia)
class String(unicode, Item):
"""
A string literal.
"""
def __new__(cls, t, value, original, trivia):
return super(String, cls).__new__(cls, value)
def __init__(
self, t, _, original, trivia
): # type: (StringType, str, original, Trivia) -> None
super(String, self).__init__(trivia)
self._t = t
self._original = original
@property
def discriminant(self): # type: () -> int
return 11
@property
def value(self): # type: () -> str
return self
def as_string(self): # type: () -> str
return "{}{}{}".format(self._t.value, decode(self._original), self._t.value)
def __add__(self, other):
result = super(String, self).__add__(other)
return self._new(result)
def __sub__(self, other):
result = super(String, self).__sub__(other)
return self._new(result)
def _new(self, result):
return String(self._t, result, result, self._trivia)
def _getstate(self, protocol=3):
return self._t, unicode(self), self._original, self._trivia
class AoT(Item, list):
"""
An array of table literal
"""
def __init__(
self, body, name=None, parsed=False
): # type: (List[Table], Optional[str], bool) -> None
self.name = name
self._body = []
self._parsed = parsed
super(AoT, self).__init__(Trivia(trail=""))
for table in body:
self.append(table)
@property
def body(self): # type: () -> List[Table]
return self._body
@property
def discriminant(self): # type: () -> int
return 12
@property
def value(self): # type: () -> List[Dict[Any, Any]]
return [v.value for v in self._body]
def append(self, table): # type: (Table) -> Table
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if m:
indent = m.group(1)
m = re.match("(?s)^([^ ]*)(.*)$", table.trivia.indent)
if not m:
table.trivia.indent = indent
else:
table.trivia.indent = m.group(1) + indent + m.group(2)
if not self._parsed and "\n" not in table.trivia.indent and self._body:
table.trivia.indent = "\n" + table.trivia.indent
self._body.append(table)
super(AoT, self).append(table)
return table
def as_string(self): # type: () -> str
b = ""
for table in self._body:
b += table.as_string()
return b
def __repr__(self): # type: () -> str
return "<AoT {}>".format(self.value)
def _getstate(self, protocol=3):
return self._body, self.name, self._parsed
class Null(Item):
"""
A null item.
"""
def __init__(self): # type: () -> None
pass
@property
def discriminant(self): # type: () -> int
return -1
@property
def value(self): # type: () -> None
return None
def as_string(self): # type: () -> str
return ""
def _getstate(self, protocol=3):
return tuple()
| |
from mock import patch
from django.core.urlresolvers import reverse
import requests
from seahub.test_utils import BaseTestCase
import datetime
class FileTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
self.video = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.mp4',
username=self.user.username)
self.audio = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.mp3',
username=self.user.username)
self.image = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.jpg',
username=self.user.username)
self.doc = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.doc',
username=self.user.username)
self.open_doc = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.odt',
username=self.user.username)
self.spreadsheet = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.xls',
username=self.user.username)
self.pdf = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.pdf',
username=self.user.username)
self.unsupported = self.create_file(repo_id=self.repo.id,
parent_dir='/',
filename='test.xxxx',
username=self.user.username)
def tearDown(self):
self.remove_repo()
def test_can_render(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.file]))
self.assertEqual(200, resp.status_code)
def test_can_download(self):
dl_url = reverse('view_lib_file', args=[self.repo.id, self.file]) + '?dl=1'
resp = self.client.get(dl_url)
self.assertEqual(302, resp.status_code)
assert '8082/files/' in resp.get('location')
resp = requests.request('GET', resp.get('location'))
cont_disp = resp.headers['content-disposition']
assert 'inline' not in cont_disp
assert 'attachment' in cont_disp
def test_can_render_video(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.video]))
self.assertEqual(200, resp.status_code)
def test_can_render_audio(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.audio]))
self.assertEqual(200, resp.status_code)
def test_can_render_image(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.image]))
self.assertEqual(200, resp.status_code)
def test_can_render_doc(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.doc]))
self.assertEqual(200, resp.status_code)
def test_can_render_open_doc(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.open_doc]))
self.assertEqual(200, resp.status_code)
def test_can_render_spreadsheet(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.spreadsheet]))
self.assertEqual(200, resp.status_code)
def test_can_render_pdf(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.pdf]))
self.assertEqual(200, resp.status_code)
def test_can_render_unsupported(self):
resp = self.client.get(reverse('view_lib_file', args=[
self.repo.id, self.unsupported]))
self.assertEqual(200, resp.status_code)
class FileAccessLogTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
self.file_path = self.file
self.repo_id = self.repo.id
def tearDown(self):
self.remove_repo()
def generate_file_audit_event_type(self, e):
return {
'file-download-web': ('web', ''),
'file-download-share-link': ('share-link',''),
'file-download-api': ('API', e.device),
'repo-download-sync': ('download-sync', e.device),
'repo-upload-sync': ('upload-sync', e.device),
}[e.etype]
@patch('seahub.views.file.is_pro_version')
def test_can_not_render_if_not_pro(self, mock_is_pro_version):
mock_is_pro_version.return_value = False
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(404, resp.status_code)
@patch('seahub.views.file.generate_file_audit_event_type')
@patch('seahub.views.file.get_file_audit_events_by_path')
@patch('seahub.views.file.is_pro_version')
@patch('seahub.views.file.FILE_AUDIT_ENABLED')
def test_can_show_web_type(self, mock_file_audit_enabled, mock_is_pro_version,
mock_get_file_audit_events_by_path, mock_generate_file_audit_event_type):
etype = 'file-download-web'
event = Event(self.user.email, self.repo_id, self.file_path, etype)
mock_file_audit_enabled.return_value = True
mock_is_pro_version.return_value = True
mock_get_file_audit_events_by_path.return_value = [event]
mock_generate_file_audit_event_type.side_effect = self.generate_file_audit_event_type
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'file_access.html')
self.assertContains(resp, 'web')
@patch('seahub.views.file.generate_file_audit_event_type')
@patch('seahub.views.file.get_file_audit_events_by_path')
@patch('seahub.views.file.is_pro_version')
@patch('seahub.views.file.FILE_AUDIT_ENABLED')
def test_can_show_share_link_type(self, mock_file_audit_enabled, mock_is_pro_version,
mock_get_file_audit_events_by_path, mock_generate_file_audit_event_type):
etype = 'file-download-share-link'
event = Event(self.user.email, self.repo_id, self.file_path, etype)
mock_file_audit_enabled.return_value = True
mock_is_pro_version.return_value = True
mock_get_file_audit_events_by_path.return_value = [event]
mock_generate_file_audit_event_type.side_effect = self.generate_file_audit_event_type
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'file_access.html')
self.assertContains(resp, 'share-link')
@patch('seahub.views.file.generate_file_audit_event_type')
@patch('seahub.views.file.get_file_audit_events_by_path')
@patch('seahub.views.file.is_pro_version')
@patch('seahub.views.file.FILE_AUDIT_ENABLED')
def test_can_show_api_type(self, mock_file_audit_enabled, mock_is_pro_version,
mock_get_file_audit_events_by_path, mock_generate_file_audit_event_type):
etype = 'file-download-api'
event = Event(self.user.email, self.repo_id, self.file_path, etype)
mock_file_audit_enabled.return_value = True
mock_is_pro_version.return_value = True
mock_get_file_audit_events_by_path.return_value = [event]
mock_generate_file_audit_event_type.side_effect = self.generate_file_audit_event_type
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'file_access.html')
self.assertContains(resp, 'API')
@patch('seahub.views.file.generate_file_audit_event_type')
@patch('seahub.views.file.get_file_audit_events_by_path')
@patch('seahub.views.file.is_pro_version')
@patch('seahub.views.file.FILE_AUDIT_ENABLED')
def test_can_show_download_sync_type(self, mock_file_audit_enabled, mock_is_pro_version,
mock_get_file_audit_events_by_path, mock_generate_file_audit_event_type):
etype = 'repo-download-sync'
event = Event(self.user.email, self.repo_id, self.file_path, etype)
mock_file_audit_enabled.return_value = True
mock_is_pro_version.return_value = True
mock_get_file_audit_events_by_path.return_value = [event]
mock_generate_file_audit_event_type.side_effect = self.generate_file_audit_event_type
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'file_access.html')
self.assertContains(resp, 'download-sync')
@patch('seahub.views.file.generate_file_audit_event_type')
@patch('seahub.views.file.get_file_audit_events_by_path')
@patch('seahub.views.file.is_pro_version')
@patch('seahub.views.file.FILE_AUDIT_ENABLED')
def test_can_show_upload_sync_type(self, mock_file_audit_enabled, mock_is_pro_version,
mock_get_file_audit_events_by_path, mock_generate_file_audit_event_type):
etype = 'repo-upload-sync'
event = Event(self.user.email, self.repo_id, self.file_path, etype)
mock_file_audit_enabled.return_value = True
mock_is_pro_version.return_value = True
mock_get_file_audit_events_by_path.return_value = [event]
mock_generate_file_audit_event_type.side_effect = self.generate_file_audit_event_type
url = reverse('file_access', args=[self.repo_id]) + '?p=' + self.file_path
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'file_access.html')
self.assertContains(resp, 'upload-sync')
class Event(object):
def __init__(self, user, repo_id, file_path, etype):
self.device = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36'
self.ip = '192.168.1.124'
self.org_id = -1
self.timestamp = datetime.datetime.now()
self.user = user
self.repo_id = repo_id
self.file_path = file_path
self.etype = etype
| |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import json
import mock
from webtest import app as webtest_app
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models
from mistral.engine import rpc
from mistral import exceptions as exc
from mistral.tests.unit.api import base
from mistral import utils
from mistral.workflow import states
WF_EX = models.WorkflowExecution(
id='123e4567-e89b-12d3-a456-426655440000',
workflow_name='some',
description='execution description.',
spec={'name': 'some'},
state=states.RUNNING,
state_info=None,
input={'foo': 'bar'},
output={},
params={'env': {'k1': 'abc'}},
created_at=datetime.datetime(1970, 1, 1),
updated_at=datetime.datetime(1970, 1, 1)
)
WF_EX_JSON = {
'id': '123e4567-e89b-12d3-a456-426655440000',
'input': '{"foo": "bar"}',
'output': '{}',
'params': '{"env": {"k1": "abc"}}',
'state': 'RUNNING',
'state_info': None,
'created_at': '1970-01-01 00:00:00',
'updated_at': '1970-01-01 00:00:00',
'workflow_name': 'some',
}
UPDATED_WF_EX = copy.copy(WF_EX)
UPDATED_WF_EX['state'] = states.PAUSED
UPDATED_WF_EX_JSON = copy.copy(WF_EX_JSON)
UPDATED_WF_EX_JSON['state'] = states.PAUSED
WF_EX_JSON_WITH_DESC = copy.copy(WF_EX_JSON)
WF_EX_JSON_WITH_DESC['description'] = "execution description."
MOCK_WF_EX = mock.MagicMock(return_value=WF_EX)
MOCK_WF_EXECUTIONS = mock.MagicMock(return_value=[WF_EX])
MOCK_UPDATED_WF_EX = mock.MagicMock(return_value=UPDATED_WF_EX)
MOCK_DELETE = mock.MagicMock(return_value=None)
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.NotFoundException())
MOCK_ACTION_EXC = mock.MagicMock(side_effect=exc.ActionException())
class TestExecutionsController(base.FunctionalTest):
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_WF_EX)
def test_get(self):
resp = self.app.get('/v2/executions/123')
self.maxDiff = None
self.assertEqual(resp.status_int, 200)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json)
@mock.patch.object(db_api, 'get_workflow_execution', MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get('/v2/executions/123', expect_errors=True)
self.assertEqual(resp.status_int, 404)
@mock.patch.object(
db_api,
'ensure_workflow_execution_exists',
MOCK_WF_EX
)
@mock.patch.object(rpc.EngineClient, 'pause_workflow',
MOCK_UPDATED_WF_EX)
def test_put(self):
resp = self.app.put_json('/v2/executions/123', UPDATED_WF_EX_JSON)
UPDATED_WF_EX_WITH_DESC = copy.copy(UPDATED_WF_EX_JSON)
UPDATED_WF_EX_WITH_DESC['description'] = 'execution description.'
self.assertEqual(resp.status_int, 200)
self.assertDictEqual(UPDATED_WF_EX_WITH_DESC, resp.json)
@mock.patch.object(
db_api,
'ensure_workflow_execution_exists',
MOCK_WF_EX
)
def test_put_stop(self):
update_exec = copy.copy(WF_EX_JSON)
update_exec['state'] = states.ERROR
update_exec['state_info'] = "Force"
with mock.patch.object(rpc.EngineClient, 'stop_workflow') as mock_pw:
wf_ex = copy.copy(WF_EX)
wf_ex['state'] = states.ERROR
wf_ex['state_info'] = "Force"
mock_pw.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
update_exec['description'] = "execution description."
self.assertEqual(resp.status_int, 200)
self.assertDictEqual(update_exec, resp.json)
mock_pw.assert_called_once_with('123', 'ERROR', "Force")
@mock.patch.object(
db_api,
'ensure_workflow_execution_exists',
MOCK_WF_EX
)
def test_put_state_info_unset(self):
update_exec = copy.copy(WF_EX_JSON)
update_exec['state'] = states.ERROR
update_exec.pop('state_info', None)
with mock.patch.object(rpc.EngineClient, 'stop_workflow') as mock_pw:
wf_ex = copy.copy(WF_EX)
wf_ex['state'] = states.ERROR
del wf_ex.state_info
mock_pw.return_value = wf_ex
resp = self.app.put_json('/v2/executions/123', update_exec)
update_exec['description'] = 'execution description.'
update_exec['state_info'] = None
self.assertEqual(resp.status_int, 200)
self.assertDictEqual(update_exec, resp.json)
mock_pw.assert_called_once_with('123', 'ERROR', None)
@mock.patch.object(db_api, 'update_workflow_execution', MOCK_NOT_FOUND)
def test_put_not_found(self):
resp = self.app.put_json(
'/v2/executions/123',
dict(state=states.PAUSED),
expect_errors=True
)
self.assertEqual(resp.status_int, 404)
def test_put_both_state_and_description(self):
self.assertRaises(
webtest_app.AppError,
self.app.put_json,
'/v2/executions/123',
WF_EX_JSON_WITH_DESC
)
@mock.patch('mistral.db.v2.api.ensure_workflow_execution_exists')
@mock.patch('mistral.db.v2.api.update_workflow_execution',
return_value=WF_EX)
def test_put_description(self, mock_update, mock_ensure):
update_params = {'description': 'execution description.'}
resp = self.app.put_json('/v2/executions/123', update_params)
self.assertEqual(resp.status_int, 200)
mock_ensure.assert_called_once_with('123')
mock_update.assert_called_once_with('123', update_params)
@mock.patch.object(rpc.EngineClient, 'start_workflow')
def test_post(self, f):
f.return_value = WF_EX.to_dict()
resp = self.app.post_json('/v2/executions', WF_EX_JSON_WITH_DESC)
self.assertEqual(resp.status_int, 201)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json)
exec_dict = WF_EX_JSON_WITH_DESC
f.assert_called_once_with(
exec_dict['workflow_name'],
json.loads(exec_dict['input']),
exec_dict['description'],
**json.loads(exec_dict['params'])
)
@mock.patch.object(rpc.EngineClient, 'start_workflow', MOCK_ACTION_EXC)
def test_post_throws_exception(self):
context = self.assertRaises(
webtest_app.AppError,
self.app.post_json,
'/v2/executions',
WF_EX_JSON
)
self.assertIn('Bad response: 400', context.message)
@mock.patch.object(db_api, 'delete_workflow_execution', MOCK_DELETE)
def test_delete(self):
resp = self.app.delete('/v2/executions/123')
self.assertEqual(resp.status_int, 204)
@mock.patch.object(db_api, 'delete_workflow_execution', MOCK_NOT_FOUND)
def test_delete_not_found(self):
resp = self.app.delete('/v2/executions/123', expect_errors=True)
self.assertEqual(resp.status_int, 404)
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_WF_EXECUTIONS)
def test_get_all(self):
resp = self.app.get('/v2/executions')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json['executions']), 1)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0])
@mock.patch.object(db_api, 'get_workflow_executions', MOCK_EMPTY)
def test_get_all_empty(self):
resp = self.app.get('/v2/executions')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json['executions']), 0)
@mock.patch.object(db_api, "get_workflow_executions", MOCK_WF_EXECUTIONS)
def test_get_all_pagination(self):
resp = self.app.get(
'/v2/executions?limit=1&sort_keys=id,workflow_name'
'&sort_dirs=asc,desc')
self.assertEqual(resp.status_int, 200)
self.assertIn('next', resp.json)
self.assertEqual(len(resp.json['executions']), 1)
self.assertDictEqual(WF_EX_JSON_WITH_DESC, resp.json['executions'][0])
param_dict = utils.get_dict_from_string(
resp.json['next'].split('?')[1],
delimiter='&'
)
expected_dict = {
'marker': '123e4567-e89b-12d3-a456-426655440000',
'limit': 1,
'sort_keys': 'id,workflow_name',
'sort_dirs': 'asc,desc'
}
self.assertDictEqual(expected_dict, param_dict)
def test_get_all_pagination_limit_negative(self):
resp = self.app.get(
'/v2/executions?limit=-1&sort_keys=id&sort_dirs=asc',
expect_errors=True
)
self.assertEqual(resp.status_int, 400)
self.assertIn("Limit must be positive", resp.body)
def test_get_all_pagination_limit_not_integer(self):
resp = self.app.get(
'/v2/executions?limit=1.1&sort_keys=id&sort_dirs=asc',
expect_errors=True
)
self.assertEqual(resp.status_int, 400)
self.assertIn("unable to convert to int", resp.body)
def test_get_all_pagination_invalid_sort_dirs_length(self):
resp = self.app.get(
'/v2/executions?limit=1&sort_keys=id&sort_dirs=asc,asc',
expect_errors=True
)
self.assertEqual(resp.status_int, 400)
self.assertIn(
"Length of sort_keys must be equal or greater than sort_dirs",
resp.body
)
def test_get_all_pagination_unknown_direction(self):
resp = self.app.get(
'/v2/actions?limit=1&sort_keys=id&sort_dirs=nonexist',
expect_errors=True
)
self.assertEqual(resp.status_int, 400)
self.assertIn("Unknown sort direction", resp.body)
| |
#!/usr/bin/env python3
"""
Imports WMO data to an SQLite3 database
WMO data comes from http://webapp1.dlib.indiana.edu/virtual_disk_library/index.cgi/4296047/FID427/
By Jon Dehdari, 2016
Usage: python3 wmo_norms_db.py
Then: sqlite3 wmo_norms.db
An example website that uses this data is www.climate-charts.com
"""
import os
import sys
import argparse
import re
import lzma
import codecs
import sqlite3 as lite
def open_files(args, data):
# Read climate element codes
# Table 6 of wmo_norms/data/ALLNORMS.TXT
# Code, Unit, Description
with open(args.climate_elem_codes) as clim_elem_codes_file:
for line in clim_elem_codes_file:
data['clim_elem_codes'].append(tuple(line.rstrip().split('\t')))
# Read region codes
# Table 1 of wmo_norms/data/ALLNORMS.TXT
# Code, Region
with open(args.region_codes) as region_codes_file:
for line in region_codes_file:
data['region_codes'].append(tuple(line.rstrip().split('\t')))
# Read statistic codes
# Table 7 of wmo_norms/data/ALLNORMS.TXT
# Code, Description
with open(args.stat_codes) as statistic_codes_file:
for line in statistic_codes_file:
(code, desc) = line.rstrip().split('\t')
desc_short = re.sub('_+value', '', re.sub('\s+', '_', desc.lower())) # Eg. "Mean Daily Value" -> "mean_daily"
data['statistic_codes'].append((code, desc_short, desc))
# Main weather data (allnorms)
# File: wmo_norms/data/allnorms.dat.utf8.xz
# Docs: wmo_norms/doc/allnorms.txt
_, allnorms_suffix = os.path.splitext(args.allnorms)
if allnorms_suffix == '.xz':
allnorms_file = lzma.open(args.allnorms, mode='rt', encoding='utf-8')
else:
allnorms_file = codecs.open(args.allnorms, 'r', 'iso-8859-1') # original file is iso-8859
for line in allnorms_file:
region = line[0]
country = line[1:3].strip()
station = line[3:16].strip()
clim_elem_code = line[26:28].strip()
statistic_code = line[28:30].strip()
jan = line[37:44].strip()
feb = line[45:52].strip()
mar = line[53:60].strip()
apr = line[61:68].strip()
may = line[69:76].strip()
jun = line[77:84].strip()
jul = line[85:92].strip()
aug = line[93:100].strip()
sep = line[101:108].strip()
octr = line[109:116].strip()
nov = line[117:124].strip()
dec = line[125:132].strip()
annual_norms_reported = line[133:141].strip()
annual_norms_computed = line[142:150].strip()
data['allnorms'].append((region, country, station, clim_elem_code, statistic_code, jan, feb, mar, apr, may, jun, jul, aug, sep, octr, nov, dec, annual_norms_reported, annual_norms_computed))
allnorms_file.close()
# Station metadata
# File: wmo_norms/data/stnmeta.dat.utf.xz
# Docs: wmo_norms/doc/stnmeta.txt
_, stnmeta_suffix = os.path.splitext(args.stnmeta)
if stnmeta_suffix == '.xz':
stnmeta_file = lzma.open(args.stnmeta, mode='rt', encoding='utf-8')
else:
stnmeta_file = codecs.open(args.stnmeta, 'r', 'iso-8859-1') # original file is iso-8859
for line in stnmeta_file:
region = line[0]
country = line[1:3].strip()
station = line[3:16].strip()
lat_degs_mem = line[18:20].strip()
lat_mins_mem = line[20:22].strip()
lat_hem_mem = line[22:23].strip()
lon_degs_mem = line[23:26].strip()
lon_mins_mem = line[26:28].strip()
lon_hem_mem = line[28:29].strip()
elev_mem = line[29:35].strip()
lat_degs_wmo = line[35:37].strip()
lat_mins_wmo = line[37:39].strip()
lat_hem_wmo = line[39:40].strip()
lon_degs_wmo = line[40:43].strip()
lon_mins_wmo = line[43:45].strip()
lon_hem_wmo = line[45:46].strip()
elev_wmo = line[46:50].strip()
name = line[136:158].strip()
country_name = line[158:208].title().strip()
data['station_meta'].append((region, country, station, lat_degs_mem, lat_mins_mem, lat_hem_mem, lon_degs_mem, lon_mins_mem, lon_hem_mem, elev_mem, lat_degs_wmo, lat_mins_wmo, lat_hem_wmo, lon_degs_wmo, lon_mins_wmo, lon_hem_wmo, elev_wmo, name, country_name))
stnmeta_file.close()
def split_allnorms(data):
"""
Normalizes statistic_codes column of allnorms data to 5NF
"""
# Initialize stat_codes dictionary
statistic_codes = {}
for code in data['statistic_codes']:
statistic_codes[code] = []
for line in data['allnorms']:
print(line[4])
def gen_db(args, data):
con = lite.connect(args.db, isolation_level=None)
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS `allnorms`")
cur.execute("""
CREATE TABLE `allnorms` (
`region` INTEGER NOT NULL,
`country` TEXT NOT NULL,
`station` TEXT NOT NULL,
`clim_elem_code` TEXT NOT NULL,
`statistic_code` TEXT,
`jan` REAL,
`feb` REAL,
`mar` REAL,
`apr` REAL,
`may` REAL,
`jun` REAL,
`jul` REAL,
`aug` REAL,
`sep` REAL,
`oct` REAL,
`nov` REAL,
`dec` REAL,
`annual_norms_reported` REAL,
`annual_norms_computed` REAL NOT NULL
);
""")
cur.executemany("INSERT INTO `allnorms` VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", data['allnorms'])
cur.execute("DROP TABLE IF EXISTS `station_meta`")
cur.execute("""
CREATE TABLE `station_meta` (
`region` INTEGER NOT NULL,
`country` TEXT NOT NULL,
`station` TEXT NOT NULL,
`lat_degs_mem` INTEGER,
`lat_mins_mem` INTEGER,
`lat_hem_mem` TEXT,
`lon_degs_mem` INTEGER,
`lon_mins_mem` INTEGER,
`lon_hem_mem` TEXT,
`elev_mem` INTEGER,
`lat_degs_wmo` INTEGER,
`lat_mins_wmo` INTEGER,
`lat_hem_wmo` TEXT,
`lon_degs_wmo` INTEGER,
`lon_mins_wmo` INTEGER,
`lon_hem_wmo` TEXT,
`elev_wmo` INTEGER,
`name` TEXT NOT NULL,
`country_name` TEXT NOT NULL
);
""")
cur.executemany("INSERT INTO `station_meta` VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", data['station_meta'])
cur.execute("DROP TABLE IF EXISTS `region_codes`")
cur.execute("""
CREATE TABLE `region_codes` (
`code` TEXT PRIMARY KEY,
`region` TEXT NOT NULL
);
""")
cur.executemany("INSERT INTO `region_codes` VALUES(?,?)", data['region_codes'])
cur.execute("DROP TABLE IF EXISTS `clim_elem_codes`")
cur.execute("""
CREATE TABLE `clim_elem_codes` (
`code` TEXT PRIMARY KEY,
`units` TEXT NOT NULL,
`desc` TEXT NOT NULL
);
""")
cur.executemany("INSERT INTO `clim_elem_codes` VALUES(?,?,?)", data['clim_elem_codes'])
cur.execute("DROP TABLE IF EXISTS `statistic_codes`")
cur.execute("""
CREATE TABLE `statistic_codes` (
`code` TEXT PRIMARY KEY,
`desc_short` TEXT NOT NULL,
`desc` TEXT NOT NULL
);
""")
cur.executemany("INSERT INTO `statistic_codes` VALUES(?,?,?)", data['statistic_codes'])
cur.execute("VACUUM;")
def main():
parser = argparse.ArgumentParser(description='Builds weather database')
parser.add_argument('--db', help='Specify Sqlite3 database output (default: %(default)s)', type=str, default='wmo_norms.db')
parser.add_argument('--allnorms', help='Specify allnorms.dat input (default: %(default)s)', type=str, default='wmo_norms_1961-1990/data/allnorms.dat.utf8.xz')
parser.add_argument('--stnmeta', help='Specify stnmeta.dat input (default: %(default)s)', type=str, default='wmo_norms_1961-1990/data/stnmeta.dat.utf8.xz')
parser.add_argument('--climate_elem_codes', help='Specify climate_elem_code.tsv (default: %(default)s)', type=str, default='wmo_norms_1961-1990/data/climate_elem_code.tsv')
parser.add_argument('--region_codes', help='Specify region_code.tsv (default: %(default)s)', type=str, default='wmo_norms_1961-1990/data/region_code.tsv')
parser.add_argument('--stat_codes', help='Specify statistic_code.tsv (default: %(default)s)', type=str, default='wmo_norms_1961-1990/data/statistic_code.tsv')
args = parser.parse_args()
data = {'allnorms':[], 'station_meta': [], 'clim_elem_codes':[], 'region_codes':[], 'statistic_codes':[]}
open_files(args, data)
#split_allnorms(data) # normalizes statistic_codes column of allnorms data to 5NF
gen_db(args, data)
if __name__ == '__main__':
main()
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import timeutils
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from magnum.common import exception
from magnum.common import utils
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
CONF = cfg.CONF
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif utils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_bays_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["baymodel_id", "name", "node_count",
"master_count", "stack_id", "api_address",
"node_addresses", "project_id", "user_id"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
query = query.filter_by(**filter_dict)
if 'status' in filters:
query = query.filter(models.Bay.status.in_(filters['status']))
return query
def get_bay_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = self._add_bays_filters(query, filters)
return _paginate_query(models.Bay, limit, marker,
sort_key, sort_dir, query)
def create_bay(self, values):
# ensure defaults are present for new bays
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
bay = models.Bay()
bay.update(values)
try:
bay.save()
except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid'])
return bay
def get_bay_by_id(self, context, bay_id):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=bay_id)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=bay_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.')
except NoResultFound:
raise exception.BayNotFound(bay=bay_name)
def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_bay(self, bay_id):
def destroy_bay_resources(session, bay_uuid):
"""Checks whether the bay does not have resources."""
query = model_query(models.Pod, session=session)
query = self._add_pods_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Service, session=session)
query = self._add_services_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.ReplicationController, session=session)
query = self._add_rcs_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Container, session=session)
query = self._add_containers_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
bay_ref = query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
destroy_bay_resources(session, bay_ref['uuid'])
query.delete()
def update_bay(self, bay_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Bay.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_bay(bay_id, values)
def _do_update_bay(self, bay_id, values):
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_baymodels_filters(self, query, filters):
if filters is None:
filters = {}
possible_filters = ["name", "image_id", "flavor_id",
"master_flavor_id", "keypair_id",
"external_network_id", "dns_nameserver",
"project_id", "user_id", "labels"]
filter_names = set(filters).intersection(possible_filters)
filter_dict = {filter_name: filters[filter_name]
for filter_name in filter_names}
return query.filter_by(**filter_dict)
def get_baymodel_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = self._add_baymodels_filters(query, filters)
# include public baymodels
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
return _paginate_query(models.BayModel, limit, marker,
sort_key, sort_dir, query)
def create_baymodel(self, values):
# ensure defaults are present for new baymodels
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
baymodel = models.BayModel()
baymodel.update(values)
try:
baymodel.save()
except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid'])
return baymodel
def get_baymodel_by_id(self, context, baymodel_id):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(id=baymodel_id)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(uuid=baymodel_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
public_q = model_query(models.BayModel).filter_by(public=True)
query = query.union(public_q)
query = query.filter_by(name=baymodel_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.')
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name)
def _is_baymodel_referenced(self, session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s)."""
query = model_query(models.Bay, session=session)
query = self._add_bays_filters(query, {'baymodel_id': baymodel_uuid})
return query.count() != 0
def _is_publishing_baymodel(self, values):
if (len(values) == 1 and
'public' in values and values['public'] is True):
return True
return False
def destroy_baymodel(self, baymodel_id):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
baymodel_ref = query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if self._is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id)
query.delete()
def update_baymodel(self, baymodel_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing BayModel.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_baymodel(baymodel_id, values)
def _do_update_baymodel(self, baymodel_id, values):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if self._is_baymodel_referenced(session, ref['uuid']):
# we only allow to update baymodel to be public
if not self._is_publishing_baymodel(values):
raise exception.BayModelReferenced(baymodel=baymodel_id)
ref.update(values)
return ref
def _add_containers_filters(self, query, filters):
if filters is None:
filters = {}
filter_names = ['name', 'image', 'project_id', 'user_id',
'memory', 'bay_uuid']
for name in filter_names:
if name in filters:
query = query.filter_by(**{name: filters[name]})
return query
def get_container_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = self._add_containers_filters(query, filters)
return _paginate_query(models.Container, limit, marker,
sort_key, sort_dir, query)
def create_container(self, values):
# ensure defaults are present for new containers
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
container = models.Container()
container.update(values)
try:
container.save()
except db_exc.DBDuplicateEntry:
raise exception.ContainerAlreadyExists(uuid=values['uuid'])
return container
def get_container_by_id(self, context, container_id):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=container_id)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
def get_container_by_uuid(self, context, container_uuid):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=container_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_uuid)
def get_container_by_name(self, context, container_name):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=container_name)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_name)
except MultipleResultsFound:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
def destroy_container(self, container_id):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
count = query.delete()
if count != 1:
raise exception.ContainerNotFound(container_id)
def update_container(self, container_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_container(container_id, values)
def _do_update_container(self, container_id, values):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_pods_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
return query
def get_pod_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = self._add_pods_filters(query, filters)
return _paginate_query(models.Pod, limit, marker,
sort_key, sort_dir, query)
def create_pod(self, values):
# ensure defaults are present for new pods
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
pod = models.Pod()
pod.update(values)
try:
pod.save()
except db_exc.DBDuplicateEntry:
raise exception.PodAlreadyExists(uuid=values['uuid'])
return pod
def get_pod_by_id(self, context, pod_id):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=pod_id)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
def get_pod_by_uuid(self, context, pod_uuid):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=pod_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_uuid)
def get_pod_by_name(self, pod_name):
query = model_query(models.Pod).filter_by(name=pod_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple pods exist with same name.'
' Please use the pod uuid instead.')
except NoResultFound:
raise exception.PodNotFound(pod=pod_name)
def destroy_pod(self, pod_id):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
count = query.delete()
if count != 1:
raise exception.PodNotFound(pod_id)
def update_pod(self, pod_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Pod.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_pod(pod_id, values)
def _do_update_pod(self, pod_id, values):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_services_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'ip' in filters:
query = query.filter_by(ip=filters['ip'])
if 'ports' in filters:
query = query.filter_by(ports=filters['ports'])
return query
def get_service_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = self._add_services_filters(query, filters)
return _paginate_query(models.Service, limit, marker,
sort_key, sort_dir, query)
def create_service(self, values):
# ensure defaults are present for new services
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
service = models.Service()
service.update(values)
try:
service.save()
except db_exc.DBDuplicateEntry:
raise exception.ServiceAlreadyExists(uuid=values['uuid'])
return service
def get_service_by_id(self, context, service_id):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=service_id)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
def get_service_by_uuid(self, context, service_uuid):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=service_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_uuid)
def get_service_by_name(self, context, service_name):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=service_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple services exist with same name.'
' Please use the service uuid instead.')
except NoResultFound:
raise exception.ServiceNotFound(service=service_name)
def destroy_service(self, service_id):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
count = query.delete()
if count != 1:
raise exception.ServiceNotFound(service_id)
def update_service(self, service_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Service.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_service(service_id, values)
def _do_update_service(self, service_id, values):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_rcs_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'replicas' in filters:
query = query.filter_by(replicas=filters['replicas'])
return query
def get_rc_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = self._add_rcs_filters(query, filters)
return _paginate_query(models.ReplicationController, limit, marker,
sort_key, sort_dir, query)
def create_rc(self, values):
# ensure defaults are present for new ReplicationController
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
rc = models.ReplicationController()
rc.update(values)
try:
rc.save()
except db_exc.DBDuplicateEntry:
raise exception.ReplicationControllerAlreadyExists(
uuid=values['uuid'])
return rc
def get_rc_by_id(self, context, rc_id):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=rc_id)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
def get_rc_by_uuid(self, context, rc_uuid):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=rc_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_uuid)
def get_rc_by_name(self, context, rc_name):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=rc_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple rcs exist with same name.'
' Please use the rc uuid instead.')
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_name)
def destroy_rc(self, rc_id):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
count = query.delete()
if count != 1:
raise exception.ReplicationControllerNotFound(rc_id)
def update_rc(self, rc_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing rc.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_rc(rc_id, values)
def _do_update_rc(self, rc_id, values):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_name(self, context, x509keypair_name):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=x509keypair_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple x509keypairs exist with '
'same name. Please use the x509keypair '
'uuid instead.')
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_name)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = {}
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
def get_x509keypair_by_bay_uuid(self, context, bay_uuid):
query = model_query(models.X509KeyPair).filter_by(bay_uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_magnum_service(self, magnum_service_id):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
count = query.delete()
if count != 1:
raise exception.MagnumServiceNotFound(magnum_service_id)
def update_magnum_service(self, magnum_service_id, values):
session = get_session()
with session.begin():
query = model_query(models.MagnumService, session=session)
query = add_identity_filter(query, magnum_service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.MagnumServiceNotFound(magnum_service_id)
if 'report_count' in values:
if values['report_count'] > ref.report_count:
ref.last_seen_up = timeutils.utcnow()
ref.update(values)
return ref
def get_magnum_service_by_host_and_binary(self, context, host, binary):
query = model_query(models.MagnumService)
query = query.filter_by(host=host, binary=binary)
try:
return query.one()
except NoResultFound:
return None
def create_magnum_service(self, values):
magnum_service = models.MagnumService()
magnum_service.update(values)
try:
magnum_service.save()
except db_exc.DBDuplicateEntry:
raise exception.MagnumServiceAlreadyExists(id=magnum_service['id'])
return magnum_service
def get_magnum_service_list(self, context, disabled=None, limit=None,
marker=None, sort_key=None, sort_dir=None
):
query = model_query(models.MagnumService)
if disabled:
query = query.filter_by(disabled=disabled)
return _paginate_query(models.MagnumService, limit, marker,
sort_key, sort_dir, query)
def create_quota(self, values):
quotas = models.Quota()
quotas.update(values)
try:
quotas.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaAlreadyExists(project_id=values['project_id'],
resource=values['resource'])
return quotas
def quota_get_all_by_project_id(self, project_id):
query = model_query(models.Quota)
result = query.filter_by(project_id=project_id).all()
return result
| |
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_almost_equal)
from pystruct.datasets import (generate_big_checker,
generate_blocks_multinomial, generate_blocks)
from pystruct.utils import (exhaustive_loss_augmented_inference,
make_grid_edges, find_constraint)
from pystruct.models import (LatentGridCRF, LatentDirectionalGridCRF,
LatentGraphCRF)
from pystruct.models.latent_grid_crf import kmeans_init
from pystruct.inference import get_installed
def test_k_means_initialization():
n_samples = 10
X, Y = generate_big_checker(n_samples=n_samples)
edges = [make_grid_edges(x, return_lists=True) for x in X]
# flatten the grid
Y = Y.reshape(Y.shape[0], -1)
X = X.reshape(X.shape[0], -1, X.shape[-1])
n_labels = len(np.unique(Y))
X = X.reshape(n_samples, -1, n_labels)
# sanity check for one state
H = kmeans_init(X, Y, edges, n_states_per_label=[1] * n_labels,
n_labels=n_labels)
H = np.vstack(H)
assert_array_equal(Y, H)
# check number of states
H = kmeans_init(X, Y, edges, n_states_per_label=[3] * n_labels,
n_labels=n_labels)
H = np.vstack(H)
assert_array_equal(np.unique(H), np.arange(6))
assert_array_equal(Y, H // 3)
# for dataset with more than two states
X, Y = generate_blocks_multinomial(n_samples=10)
edges = [make_grid_edges(x, return_lists=True) for x in X]
Y = Y.reshape(Y.shape[0], -1)
X = X.reshape(X.shape[0], -1, X.shape[-1])
n_labels = len(np.unique(Y))
# sanity check for one state
H = kmeans_init(X, Y, edges, n_states_per_label=[1] * n_labels,
n_labels=n_labels)
H = np.vstack(H)
assert_array_equal(Y, H)
# check number of states
H = kmeans_init(X, Y, edges, n_states_per_label=[2] * n_labels,
n_labels=n_labels)
H = np.vstack(H)
assert_array_equal(np.unique(H), np.arange(6))
assert_array_equal(Y, H // 2)
def test_k_means_initialization_grid_crf():
# with only 1 state per label, nothing happends
X, Y = generate_big_checker(n_samples=10)
crf = LatentGridCRF(n_states_per_label=1, n_features=2, n_labels=2)
H = crf.init_latent(X, Y)
assert_array_equal(Y, H)
def test_k_means_initialization_graph_crf():
# with only 1 state per label, nothing happends
X, Y = generate_big_checker(n_samples=10)
crf = LatentGraphCRF(n_states_per_label=1, n_features=2, n_labels=2)
# convert grid model to graph model
X = [(x.reshape(-1, x.shape[-1]), make_grid_edges(x, return_lists=False))
for x in X]
H = crf.init_latent(X, Y)
assert_array_equal(Y, H)
def test_k_means_initialization_directional_grid_crf():
X, Y = generate_big_checker(n_samples=10)
crf = LatentDirectionalGridCRF(n_states_per_label=1, n_features=2,
n_labels=2)
#crf.initialize(X, Y)
H = crf.init_latent(X, Y)
assert_array_equal(Y, H)
def test_blocks_crf_unaries():
X, Y = generate_blocks(n_samples=1)
x, _ = X[0], Y[0]
unary_weights = np.repeat(np.eye(2), 2, axis=0)
pairwise_weights = np.array([0,
0, 0,
0, 0, 0,
0, 0, 0, 0])
w = np.hstack([unary_weights.ravel(), pairwise_weights])
crf = LatentGridCRF(n_states_per_label=2, n_labels=2, n_features=2)
h_hat = crf.inference(x, w)
assert_array_equal(h_hat // 2, np.argmax(x, axis=-1))
def test_blocks_crf():
X, Y = generate_blocks(n_samples=1)
x, y = X[0], Y[0]
pairwise_weights = np.array([0,
0, 0,
-4, -4, 0,
-4, -4, 0, 0])
unary_weights = np.repeat(np.eye(2), 2, axis=0)
w = np.hstack([unary_weights.ravel(), pairwise_weights])
crf = LatentGridCRF(n_states_per_label=2, n_labels=2, n_features=2)
h_hat = crf.inference(x, w)
assert_array_equal(y, h_hat // 2)
h = crf.latent(x, y, w)
assert_equal(crf.loss(h, h_hat), 0)
def test_blocks_crf_directional():
# test latent directional CRF on blocks
# test that all results are the same as equivalent LatentGridCRF
X, Y = generate_blocks(n_samples=1)
x, y = X[0], Y[0]
pairwise_weights = np.array([0,
0, 0,
-4, -4, 0,
-4, -4, 0, 0])
unary_weights = np.repeat(np.eye(2), 2, axis=0)
w = np.hstack([unary_weights.ravel(), pairwise_weights])
pw_directional = np.array([0, 0, -4, -4,
0, 0, -4, -4,
-4, -4, 0, 0,
-4, -4, 0, 0,
0, 0, -4, -4,
0, 0, -4, -4,
-4, -4, 0, 0,
-4, -4, 0, 0])
w_directional = np.hstack([unary_weights.ravel(), pw_directional])
crf = LatentGridCRF(n_states_per_label=2)
crf.initialize(X, Y)
directional_crf = LatentDirectionalGridCRF(n_states_per_label=2)
directional_crf.initialize(X, Y)
h_hat = crf.inference(x, w)
h_hat_d = directional_crf.inference(x, w_directional)
assert_array_equal(h_hat, h_hat_d)
h = crf.latent(x, y, w)
h_d = directional_crf.latent(x, y, w_directional)
assert_array_equal(h, h_d)
h_hat = crf.loss_augmented_inference(x, y, w)
h_hat_d = directional_crf.loss_augmented_inference(x, y, w_directional)
assert_array_equal(h_hat, h_hat_d)
joint_feature = crf.joint_feature(x, h_hat)
joint_feature_d = directional_crf.joint_feature(x, h_hat)
assert_array_equal(np.dot(joint_feature, w), np.dot(joint_feature_d, w_directional))
def test_latent_consistency_zero_pw_graph():
crf = LatentGraphCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.zeros(18)
w[:8] = np.random.normal(size=8)
y = np.random.randint(2, size=(5))
x = np.random.normal(size=(5, 2))
h = crf.latent((x, np.zeros((0, 2), dtype=np.int)), y, w)
assert_array_equal(h // 2, y)
def test_latent_consistency_graph():
crf = LatentGraphCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.random.normal(size=18)
y = np.random.randint(2, size=(4))
x = np.random.normal(size=(4, 2))
e = np.array([[0, 1], [1, 2], [2, 0]], dtype=np.int)
h = crf.latent((x, e), y, w)
assert_array_equal(h // 2, y)
def test_loss_augmented_inference_energy_graph():
crf = LatentGraphCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.random.normal(size=18)
y = np.random.randint(2, size=(3))
x = np.random.normal(size=(3, 2))
e = np.array([[0, 1], [1, 2], [2, 0]], dtype=np.int)
h_hat, energy = crf.loss_augmented_inference((x, e), y * 2, w,
relaxed=True,
return_energy=True)
assert_almost_equal(-energy, np.dot(w, crf.joint_feature((x, e), h_hat))
+ crf.loss(y * 2, h_hat))
def test_latent_consistency_zero_pw_grid():
crf = LatentGridCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.zeros(18)
w[:8] = np.random.normal(size=8)
y = np.random.randint(2, size=(5, 5))
x = np.random.normal(size=(5, 5, 2))
h = crf.latent(x, y, w)
assert_array_equal(h // 2, y)
def test_latent_consistency_grid():
crf = LatentGridCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.random.normal(size=18)
y = np.random.randint(2, size=(4, 4))
x = np.random.normal(size=(4, 4, 2))
h = crf.latent(x, y, w)
assert_array_equal(h // 2, y)
def test_loss_augmented_inference_exhaustive_grid():
crf = LatentGridCRF(n_labels=2, n_features=2, n_states_per_label=2)
for i in range(10):
w = np.random.normal(size=18)
y = np.random.randint(2, size=(2, 2))
x = np.random.normal(size=(2, 2, 2))
h_hat = crf.loss_augmented_inference(x, y * 2, w)
h = exhaustive_loss_augmented_inference(crf, x, y * 2, w)
assert_array_equal(h, h_hat)
def test_continuous_y():
for inference_method in get_installed(["lp", "ad3"]):
X, Y = generate_blocks(n_samples=1)
x, y = X[0], Y[0]
w = np.array([1, 0, # unary
0, 1,
0, # pairwise
-4, 0])
crf = LatentGridCRF(n_labels=2, n_features=2, n_states_per_label=1,
inference_method=inference_method)
joint_feature = crf.joint_feature(x, y)
y_cont = np.zeros_like(x)
gx, gy = np.indices(x.shape[:-1])
y_cont[gx, gy, y] = 1
# need to generate edge marginals
vert = np.dot(y_cont[1:, :, :].reshape(-1, 2).T,
y_cont[:-1, :, :].reshape(-1, 2))
# horizontal edges
horz = np.dot(y_cont[:, 1:, :].reshape(-1, 2).T,
y_cont[:, :-1, :].reshape(-1, 2))
pw = vert + horz
joint_feature_cont = crf.joint_feature(x, (y_cont, pw))
assert_array_almost_equal(joint_feature, joint_feature_cont, 4)
const = find_constraint(crf, x, y, w, relaxed=False)
const_cont = find_constraint(crf, x, y, w, relaxed=True)
# djoint_feature and loss are equal:
assert_array_almost_equal(const[1], const_cont[1], 4)
assert_almost_equal(const[2], const_cont[2], 4)
if isinstance(const_cont[0], tuple):
# returned y_hat is one-hot version of other
assert_array_equal(const[0], np.argmax(const_cont[0][0], axis=-1))
# test loss:
assert_almost_equal(crf.loss(y, const[0]),
crf.continuous_loss(y, const_cont[0][0]), 4)
| |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test covers a resharding scenario of an already sharded keyspace.
We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-.
This test is the main resharding test. It not only tests the regular resharding
workflow for an horizontal split, but also a lot of error cases and side
effects, like:
- migrating the traffic one cell at a time.
- migrating rdonly traffic back and forth.
- making sure we can't migrate the master until replica and rdonly are migrated.
- has a background thread to insert data during migration.
- tests a destination shard master failover while replication is running.
- tests a filtered replication source replacement while filtered replication
is running.
- tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'.
- makes sure the key range rules are properly enforced on masters.
"""
import threading
import time
import logging
import unittest
import base_sharding
import environment
import tablet
import utils
from vtproto import topodata_pb2
from vtdb import keyrange_constants
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
shard_2_rdonly1 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
shard_2_tablets = [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1]
shard_3_tablets = [shard_3_master, shard_3_replica, shard_3_rdonly1]
all_tablets = ([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1] +
shard_2_tablets + shard_3_tablets)
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql(use_rbr=base_sharding.use_rbr)
for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id, user_id,
keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.user_id = user_id
self.keyspace_id = keyspace_id
self.str_keyspace_id = utils.uint64_to_hex(keyspace_id)
self.done = False
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into timestamps(id, time_milli, custom_ksid_col) '
'values(%d, %d, 0x%x) '
'/* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(self.thread_id, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'update timestamps set time_milli=%d '
'where id=%d /* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(long(time.time() * 1000), self.thread_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
time.sleep(0.2)
except Exception: # pylint: disable=broad-except
logging.exception('InsertThread got exception.')
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chunks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.done = False
self.max_lag_ms = 0
self.lag_sum_ms = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery(
'vt_test_keyspace',
'select time_milli from timestamps where id=%d' %
self.thread_id)
if result:
lag_ms = long(time.time() * 1000) - long(result[0][0])
logging.debug('MonitorLagThread(%s) got %d ms',
self.thread_name, lag_ms)
self.sample_count += 1
self.lag_sum_ms += lag_ms
if lag_ms > self.max_lag_ms:
self.max_lag_ms = lag_ms
time.sleep(5.0)
except Exception: # pylint: disable=broad-except
logging.exception('MonitorLagThread got exception.')
class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_table_bindata_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
id bigint not null,
parent_id bigint not null,
msg bit(8),
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(parent_id, id, msg, custom_ksid_col)'
'as select parent_id, id, msg, custom_ksid_col '
'from %s')
create_timestamp_table = '''create table timestamps(
id int not null,
time_milli bigint(20) unsigned not null,
custom_ksid_col ''' + t + ''' not null,
primary key (id)
) Engine=InnoDB'''
# Make sure that clone and diff work with tables which have no primary key.
# RBR only because Vitess requires the primary key for query rewrites if
# it is running with statement based replication.
create_no_pk_table = '''create table no_pk(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_bindata_template % ('resharding3'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
if base_sharding.use_rbr:
utils.run_vtctl(['ApplySchema', '-sql=' + create_no_pk_table,
'test_keyspace'], auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._insert_value(shard_0_master, 'resharding3', 1, 'a',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding3', 2, 'b',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding3', 3, 'c',
0xD000000000000000)
if base_sharding.use_rbr:
self._insert_value(shard_1_master, 'no_pk', 1, 'msg1',
0xA000000000000000)
# TODO(github.com/vitessio/vitess/issues/2880): Add more rows here such
# clone and diff would break when the insertion order on source and
# dest shards is different.
def _check_startup_values(self):
# check first value is in the right shard
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
self._check_value(t, 'resharding3', 2, 'b', 0x9000000000000000,
should_be_here=False)
# check second value is in the right shard too
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000,
should_be_here=False)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
self._check_value(t, 'resharding3', 3, 'c', 0xD000000000000000)
if base_sharding.use_rbr:
for t in shard_2_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000,
should_be_here=False)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
def _exec_multi_shard_dmls(self):
mids = [10000001, 10000002, 10000003]
msg_ids = ['msg-id10000001', 'msg-id10000002', 'msg-id10000003']
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000004, 10000005]
msg_ids = ['msg-id10000004', 'msg-id10000005']
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000011, 10000012, 10000013]
msg_ids = ['msg-id10000011', 'msg-id10000012', 'msg-id10000013']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000011, 10000012], 'update1')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000013], 'update2')
mids = [10000014, 10000015, 10000016]
msg_ids = ['msg-id10000014', 'msg-id10000015', 'msg-id10000016']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding1',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000016])
# repeat DMLs for table with msg as bit(8)
mids = [10000001, 10000002, 10000003]
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['a','b','c'], keyspace_ids)
mids = [10000004, 10000005]
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['d', 'e'], keyspace_ids)
mids = [10000011, 10000012, 10000013]
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['k', 'l', 'm'], keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding3',
[10000011, 10000012], 'g')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding3',
[10000013], 'h')
mids = [10000014, 10000015, 10000016]
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding3', mids,
['n', 'o', 'p'], keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding3',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding3', [10000016])
def _check_multi_shard_values(self):
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000011, 'update1', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000012, 'update1', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000013, 'update2', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000014, 'msg-id10000014', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000015, 'msg-id10000015', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000016, 'msg-id10000016', 0xF000000000000000,
should_be_here=False)
# checks for bit(8) table
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000001, 'a', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000002, 'b', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000003, 'c', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000001, 'a', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000002, 'b', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000003, 'c', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000004, 'd', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000005, 'e', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000004, 'd', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000005, 'e', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding3', 10000011, 'g', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000012, 'g', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding3', 10000013, 'h', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000014, 'n', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000015, 'o', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding3', 10000016, 'p', 0xF000000000000000,
should_be_here=False)
# _check_multi_dbs checks the row in multiple dbs.
def _check_multi_dbs(self, dblist, table, mid, msg, keyspace_id,
should_be_here=True):
for db in dblist:
self._check_value(db, table, mid, msg, keyspace_id, should_be_here)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# we're going to reparent and swap these two
global shard_2_master, shard_2_replica1
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force',
'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
shard_0_master.init_tablet('replica', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = (base_sharding.keyspace_id_type ==
keyrange_constants.KIT_BYTES)
# create databases so vttablet can start behaving somewhat normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args,
binlog_use_v3_resharding_mode=False)
# wait for the tablets (replication is not setup, they won't be healthy)
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards))
self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards))
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the split shards
shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_2_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_3_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_replica1, shard_2_replica2, shard_2_rdonly1,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
for s in ['-80', '80-', '80-c0', 'c0-']:
self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# disable shard_1_slave2, so we're sure filtered replication will go
# from shard_1_slave1
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Copy the data from the source to the destination shards.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
#
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Test the correct handling of keyspace_id changes which happen after
# the first clone.
# Let row 2 go to shard 3 instead of shard 2.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0xD000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 2 and inserted to shard 3.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0xD000000000000000)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Move row 2 back to shard 2 from shard 3 by changing the keyspace_id again.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0x9000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 3 and inserted to shard 2.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 2 (provokes an insert).
shard_2_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_3_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 and 5 (provokes a delete).
self._insert_value(shard_3_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
self._insert_value(shard_3_master, 'resharding1', 5, 'msg5',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablet, which was taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 2, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# Verify vreplication table entries
result = shard_2_master.mquery('_vt', 'select * from vreplication')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], 'SplitClone')
self.assertEqual(result[0][2],
'keyspace:"test_keyspace" shard:"80-" '
'key_range:<start:"\\200" end:"\\300" > ')
result = shard_3_master.mquery('_vt', 'select * from vreplication')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], 'SplitClone')
self.assertEqual(result[0][2],
'keyspace:"test_keyspace" shard:"80-" key_range:<start:"\\300" > ')
# check the binlog players are running and exporting vars
self.check_destination_master(shard_2_master, ['test_keyspace/80-'])
self.check_destination_master(shard_3_master, ['test_keyspace/80-'])
# When the binlog players/filtered replication is turned on, the query
# service must be turned off on the destination masters.
# The tested behavior is a safeguard to prevent that somebody can
# accidentally modify data on the destination masters while they are not
# migrated yet and the source shards are still the source of truth.
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_3_master.wait_for_vttablet_state('NOT_SERVING')
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_1_slave1, horizontal=True)
# Check that the throttler was enabled.
# The stream id is hard-coded as 1, which is the first id generated
# through auto-inc.
self.check_throttler_service(shard_2_master.rpc_endpoint(),
['BinlogPlayer/1'], 9999)
self.check_throttler_service(shard_3_master.rpc_endpoint(),
['BinlogPlayer/1'], 9999)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Executing MultiValue Insert Queries')
self._exec_multi_shard_dmls()
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
logging.debug('Checking MultiValue Insert Queries')
self._check_multi_shard_values()
self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_1_slave1, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias])
if base_sharding.use_multi_split_diff:
logging.debug('Running vtworker MultiSplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'MultiSplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
else:
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for destination master tablets, make sure we have it all
if base_sharding.use_rbr:
# We submitted non-annotated DMLs, that are properly routed
# with RBR, but not with SBR. So the first shard counts
# are smaller. In the second shard, we submitted statements
# that affect more than one keyspace id. These will result
# in two queries with RBR. So the count there is higher.
self.check_running_binlog_player(shard_2_master, 4036, 2016)
self.check_running_binlog_player(shard_3_master, 4056, 2016)
else:
self.check_running_binlog_player(shard_2_master, 4044, 2016)
self.check_running_binlog_player(shard_3_master, 4048, 2016)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 1, 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 2, 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low', 1)
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high', 2)
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
self.check_binlog_server_vars(shard_1_slave2, horizontal=True,
min_statements=800, min_transactions=800)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# Shouldn't be able to rebuild keyspace graph while migration is on going
# (i.e there are records that have tablet controls set)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True,
expect_fail=True,
)
# rerun migrate to ensure it doesn't fail
# skip refresh to make it go faster
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'-skip-refresh-state=true',
'test_keyspace/80-', 'rdonly'], auto_log=True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# rerun migrate to ensure it doesn't fail
# skip refresh to make it go faster
utils.run_vtctl(['MigrateServedTypes', '-skip-refresh-state=true',
'test_keyspace/80-', 'rdonly'], auto_log=True)
# then serve replica from the split shards
destination_shards = ['80-c0', 'c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, 'test_nj', 'test_keyspace', destination_shards,
topodata_pb2.REPLICA, False)
utils.check_shard_query_services(self, 'test_ny', 'test_keyspace', destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, 'test_nj', 'test_keyspace', destination_shards,
topodata_pb2.REPLICA, True)
utils.check_shard_query_services(self, 'test_ny', 'test_keyspace', destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/80-c0',
'-new_master', shard_2_replica1.tablet_alias])
# update our test variables to point at the new master
shard_2_master, shard_2_replica1 = shard_2_replica1, shard_2_master
logging.debug('Inserting lots of data on source shard after reparenting')
self._insert_lots(3000, base=2000)
logging.debug('Checking 80 percent of data was sent fairly quickly')
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
if base_sharding.use_multi_split_diff:
logging.debug('Running vtworker MultiSplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'MultiSplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/80-'],
auto_log=True)
else:
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug('DELAY 1: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_1.thread_name,
monitor_thread_1.max_lag_ms,
monitor_thread_1.lag_sum_ms / monitor_thread_1.sample_count)
logging.debug('DELAY 2: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_2.thread_name,
monitor_thread_2.max_lag_ms,
monitor_thread_2.lag_sum_ms / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '1'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '1', 'test_keyspace/80-'],
auto_log=True)
# CancelResharding should fail because migration has started.
utils.run_vtctl(['CancelResharding', 'test_keyspace/80-'],
auto_log=True, expect_fail=True)
# do a Migrate that will fail waiting for replication
# which should cause the Migrate to be canceled and the source
# master to be serving again.
utils.run_vtctl(['MigrateServedTypes',
'-filtered_replication_wait_time', '0s',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, True, False)
# sabotage master migration and make it fail in an unfinished state.
utils.run_vtctl(['SetShardTabletControl', '-blacklisted_tables=t',
'test_keyspace/c0-', 'master'], auto_log=True)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
# Query service is disabled in source shard as failure occurred after point of no return
utils.check_tablet_query_service(self, shard_1_master, False, True)
# Global topology records should not change as migration did not succeed
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertEqual(shard['is_master_serving'], True, 'source shards should be set in destination shard')
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/c0-'])
self.assertEqual(len(shard['source_shards']), 1, 'source shards should be set in destination shard')
self.assertEqual(shard['is_master_serving'], False, 'source shards should be set in destination shard')
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-c0'])
self.assertEqual(len(shard['source_shards']), 1, 'source shards should be set in destination shard')
self.assertEqual(shard['is_master_serving'], False, 'source shards should be set in destination shard')
# remove sabotage, but make it fail early. This should not result
# in the source master serving, because this failure is past the
# point of no return.
utils.run_vtctl(['SetShardTabletControl', '-blacklisted_tables=t',
'-remove', 'test_keyspace/c0-', 'master'], auto_log=True)
utils.run_vtctl(['MigrateServedTypes',
'-filtered_replication_wait_time', '0s',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# do the migration that's expected to succeed
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check destination shards are serving
utils.check_tablet_query_service(self, shard_2_master, True, False)
utils.check_tablet_query_service(self, shard_3_master, True, False)
# check the binlog players are gone now
self.check_no_binlog_player(shard_2_master)
self.check_no_binlog_player(shard_3_master)
# test reverse_replication
# start with inserting a row in each destination shard
self._insert_value(shard_2_master, 'resharding2', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_3_master, 'resharding2', 3, 'msg3',
0xD000000000000000)
# ensure the rows are not present yet
self._check_value(shard_1_master, 'resharding2', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_1_master, 'resharding2', 3, 'msg3',
0xD000000000000000, should_be_here=False)
# repeat the migration with reverse_replication
utils.run_vtctl(['MigrateServedTypes', '-reverse_replication=true',
'test_keyspace/80-', 'master'], auto_log=True)
# look for the rows in the original master after a short wait
time.sleep(1.0)
self._check_value(shard_1_master, 'resharding2', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_1_master, 'resharding2', 3, 'msg3',
0xD000000000000000)
# retry the migration to ensure it now fails
utils.run_vtctl(['MigrateServedTypes', '-reverse_replication=true',
'test_keyspace/80-', 'master'],
auto_log=True, expect_fail=True)
# CancelResharding should now succeed
utils.run_vtctl(['CancelResharding', 'test_keyspace/80-'], auto_log=True)
self.check_no_binlog_player(shard_1_master)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards should be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertTrue('cells' not in shard or not shard['cells'])
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# make sure we can't delete the destination shard now that it's serving
_, stderr = utils.run_vtctl(['DeleteShard', 'test_keyspace/80-c0'],
expect_fail=True)
self.assertIn('is still serving, cannot delete it', stderr)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
| |
"""This module is used to simulate multiple runs of Genetic Algorithm using
CellNOptR. It is used to build an average best score vector to be compared with
other type of optimisation (e.g. Metropolis Hasting).
For instance, to run the GA 100 times on the toy model (default value of model
and data parameters)::
g = BuildAverageGAResults(N=100, model="ToyModelMMB.sif", data="ToyModelMMB.csv")
g.run()
Default parameters of the GA are used (e.g. popsize=50) but can be changed::
g.popsize = 20
"""
import numpy
from numpy import linspace, loadtxt, array
#from cellnopt.wrapper import CNORbool
from pylab import *
#from cinapps.mcmc.core import *
#from cinapps.mcmc.diagnostics import *
__all__ = ["BuildAverageGAResults", "MultiGABool", "GABool"]
class GABool(CNORbool):
def __init__(self, model=None, data=None, verbose=False, popsize=10,
maxgens=10, mode="T1", debug=True, compression=True, expansion=True):
#CNORbool.__init__(self, model, data, debug=debug)
#self.preprocessing(compression=compression, expansion=expansion)
self._popsize = popsize
self._maxgens = maxgens
self.mode = mode
self.reset()
def reset(self):
self.results = Results(step=self.popsize, N=self.maxgens*self.popsize)
self.resultsT2 = Results(step=self.popsize, N=self.maxgens*self.popsize)
def _get_maxgens(self):
return self._maxgens
def _set_maxgens(self, N):
self._maxgens = N
self.results.N = self._maxgens * self._popsize
maxgens = property(_get_maxgens, _set_maxgens)
def _get_popsize(self):
return self._popsize
def _set_popsize(self, popsize):
self._popsize = popsize
self.results.step = popsize # inside results
self.results.N = self._maxgens * self._popsize
popsize = property(_get_popsize, _set_popsize)
def runT1(self, **kargs):
self.gaBinaryT1(popsize=self.popsize, maxgens=self.maxgens-1, maxtime=1000000000, **kargs)
self.results['best_scores'] = self.T1opt.results.Best_score[:]
self.results['best_score'] = self.T1opt.results.Best_score[-1]
self.results['best_parameters'] = list(self.T1opt.bString)[:]
#self.results['all_best_parameters'].append(self.results['best_parameters'])
# same as Best_score but we need to populate scores to prevent plot
# method to fail.
self.results['scores'] = list(self.T1opt.results.Best_score_Gen)[:]
self.results['parameters'] = [[int(y) for y in x.split(',')] for x in
self.T1opt.results.Best_bit_Gen]
def runT2(self, **kargs):
try:
self.gaBinaryT2(popsize=self.popsize, maxgens=self.maxgens-1, maxtime=1000000000, **kargs)
except:
print "something failed in gaBinaryT2", self.T1opt.results, self.T1opt.bString
self.resultsT2['best_scores'] = self.T2opt.results.Best_score[:]
self.resultsT2['best_score'] = self.T2opt.results.Best_score[-1]
self.resultsT2['best_parameters'] = list(self.T2opt.bString)[:]
if self.T2opt.results.Best_bit_Gen[0] == "":
self.resultsT2['parameters'] = [[] for x in self.T2opt.results.Best_bit_Gen]
else:
self.resultsT2['parameters'] = [[int(y) for y in x.split(',')] for x in
self.T2opt.results.Best_bit_Gen]
#self.resultsT2['all_best_parameters'].append(self.resultsT2['best_parameters'])
# same as Best_score but we need to populate scores to prevent plot
# method to fail.
self.resultsT2['scores'] = list(self.T2opt.results.Best_score_Gen)[:]
def run(self, mode="T1", **kargs):
self.runT1(**kargs)
if mode=="T2":
self.runT2(**kargs)
def plot(self):
self.results.plot()
class MultiGABool(MultiOptimisation):
def __init__(self, model, data, maxgens=20, popsize=50, Nruns=100, verbose=True):
super(MultiGABool, self).__init__(model, data, maxgens*popsize, Nruns,
optim_func=GABool, optim_params={'verbose':verbose,
'maxgens':maxgens, 'popsize':popsize})
def run(self, mode="T1", **kargs):
super(MultiGABool, self).run(mode=mode, **kargs)
class BuildAverageGAResults(object):
def __init__(self, model="ToyModelMMB.sif", data="ToyModelMMB.csv", N=10,
popsize=50, maxgens=20, pmutation=0.5, verbose=False):
"""Build an average best score curve over N simulations.
:param model:
:param data:
:param N:
:param popsize:
:param maxgens:
To create an instance and run the GA binary 100 times with the default
GA parameters, type:
from tools import *
g = BuildAverageGAResults(N=100, model="ToyModelMMB.sif", data="ToyModelMMB.csv")
g.run()
g.plot()
Results can be saved::
g.savedata(filename="test.dat")
And retrieved later on as follows::
g = BuildAverageGAResults(N=100)
g.loaddata(filename="test.dat")
g.plot()
g.ydata # contains the average best_scores over number of iteration.
.. note:: The number of iterations is simply popsize times maxgens. There is one
value for each generation
Results can be plotted using the R object::
g.b.plotModel()
g.b.plotFit()
"""
assert popsize>2
assert maxgens>1
assert pmutation >=0 and pmutation<=1
self.popsize = popsize
self.elitism = 5 # default gabinary not to be changed.
self.pmutation = pmutation
if self.popsize<= self.elitism:
self.elitism = self.popsize/2
self.maxgens = maxgens
if self.maxgens > 100:
self.stallgenmax = maxgens
else:
self.stallgenmax = 100
self.N = N
from cellnopt.data import cnodata
from cellnopt.wrapper import CNORbool
self.b = CNORbool(cnodata(model), cnodata(data), verbose=verbose)
self.data = data
self.model = model
self.verbose = verbose
print "Initialisation done. call run() method and plot() to see the results"
self.allresults = []
self.best_bitstrings = []
self.best_scores = []
self._computed = False
self.xdata = [x*self.popsize for x in range(1,self.maxgens+1)]
def runT1(self, maxtime=100):
self.run(maxtime=maxtime)
def run(self, maxtime=100):
"""Run the Genetic Algorithm several times each vector containing the
scores at each generation.
"""
if self._computed == True:
print "Data already computed, set computed to False to force the run()"
error = 0
for i in range(0, self.N):
print "Running simulation %s" % i
# 19 is required instead of 20 because simu stops after the 19th running
# an extra one anyway making the final value to be 20.
self.b.run(stallgenmax=self.stallgenmax,
popsize=self.popsize,maxtime=maxtime, pmutation=self.pmutation,
maxgens=self.maxgens-1, elitism=self.elitism, verbose=False, show=False, writeall=False)
# keep only the first 20 generation (popsize=50) that makes 1000
# iteration in total.
res = self.b.T1opt.results.Best_score
# TODO: adapt code to get bewst bitsrint after each run
self.current_best_bitstring = self.b.T1opt.results.Best_bitString[-1][:]
self.current_best_bitstring= [int(x) for x in self.current_best_bitstring.split(",")]
if i == 0:
average = numpy.array(res)
else:
try:
average += numpy.array(res)
except:
print "Simulation returns a vector of different length. Maybe maxtime is not large enough"
# if arrays have different size, += above will not work.
# Just increment error to used later when computing the
# average. Could happen if maxtime is too short.
error+=1
self.best_scores.append(min(res))
self.allresults.append(res)
self.best_bitstrings.append(self.current_best_bitstring)
self.ydata = average/float(self.N-error)
self._computed = True
if len(self.ydata) != len(self.xdata):
raise ValueError("ydata length does not match xdata length. You may change xdata accordingly or rerun with a larger maxtime parameter.")
def runT2(self):
assert self._computed, "must call run() first"
def plot(self):
"""Plot the scores"""
from pylab import plot
plot(self.xdata, self.ydata)
xlabel("Number of computeScore calls")
ylabel("Score")
ylim([0, ylim()[1]])
def hist(self, nbins=20):
"""Create histogram of the scores"""
from pylab import hist
best_scores = [min(x) for x in self.allresults]
res = hist(best_scores, nbins)
print "Found %s unique solution:" % len(set(best_scores))
print set(best_scores)
return res
def savedata(self, filename1="BuildAverageGAResults", filename2="BuildAverageGABitstrings",filename3="BuildAverageGABestScores"):
"""Save the main results (best score, and the averaged bitstring) into a pickle"""
import pickle
# b is the CNORbool object that may change in the future so saving it
# into a pickle is unstable. Let us remove it before saving the pickle
# and get it back after the pickle is done
b = self.b
del self.b
pickle.dump(self, open(filename1, "w"))
print "object saved in " + filename1
self.b = b
#store best_bitstrings
pickle.dump(numpy.array(self.best_bitstrings), open(filename2, "w"))
print "object saved in " + filename2
#store best_scores
pickle.dump(numpy.array(self.best_scores), open(filename3, "w"))
print "object saved in " + filename3
def loaddata(self, filename):
"""load a data set created with savedata method with pickle."""
import pickle
res = pickle.load(open(filename))
#bitstrings = pickle.load(open(filename2))
self.allresults = res.allresults[:]
self.xdata = res.xdata[:]
self.ydata = res.ydata[:]
self.N = res.N
self.popsize = res.popsize
self.maxgens = res.maxgens
self.computed = True
data = res.data
model = res.model
verbose = res.verbose
self.verbose = res.verbose
from cellnopt.data import cnodata
self.b = CNORbool(cnodata(model), cnodata(data), verbose=verbose)
del res
def loadbitstrings(self,filename):
"""load a bitstrings set created with savedata method with pickle."""
import pickle
bitstrings = pickle.load(open(filename))
return bitstrings
def loadbestscores(self,filename):
"""load a best scores from each generation that were created with savedata method with pickle."""
import pickle
best_scores = pickle.load(open(filename))
return best_scores
"""
def get_gabinary_toy("BuildAverageGAResults_toy_20_times_50.dat"):
g = BuildAverageGAResults(N=100)
g.loaddata(filename=filename)
return g.ydata
def get_gabinary_extliver("BuildAverageGAResults_extliver_20_times_50.dat"):
g = BuildAverageGAResults(N=100)
g.loaddata(filename=filename)
return g.ydata
"""
| |
# coding=utf-8
import argparse
import base64
import io
import os
import shutil
import sys
from contextlib import redirect_stdout
"""
This file handles the commandline interface
process can also be run, where all its arguments are file handles
"""
def main():
"""
Provides the command line interface receives either filenames, stdin, or stdout, sanitizes them and calls the
process function with file handles.
:return:
"""
usage = """
## With zipfiles
Try this first,
```bash
$ flopymetascript --outbytesfile output.zip --inbytesfile input.zip --logfile log.txt
```
input.zip is a zip-file that contains MODFLOW input files and a single .nam file. Its content is processed and
written to output.zip. Some logging is written to log.txt. The `$`-sign should be omitted, and only refers to that the
command is to be entered in the bash-commandline.
## Using pipes
```bash
$ openssl base64 -in input.zip -out input.zip.b64
$ flopymetascript --outbytesfile output.zip --inbase64file input.zip.b64
```
input.zip is encoded to base64 and is used as input file for flopymetascript
```bash
$ flopymetascript --outbytesfile output.zip --inbase64file - < input.zip.b64
```
The content of input.zip.b64 is streamed/piped to flopymetascript
```bash
$ openssl base64 -in input.zip | flopymetascript --outbytesfile output.zip --inbase64file -
```
The same as what is done previously, however input.zip is encoded and instead of writing it to a file, it is passed
as stdin to the inbase64file argument of flopymetascript.
```bash
$ openssl base64 -in input.zip | flopymetascript --outbase64file utput.zip --inbase64file - --logfile -
```
The log file is printed to stdout.
You cannot send both outbase64file and logfile to stdout. They will be mixed and the resulting output file is not
readable.
"""
description = """
Converts a zip with MODFLOW input files to a zip containing Flopy script
"""
epilog = """
No money is to be made with this service. I you find it useful, please donate to charity (be creative in choosing
which one) and send me a note. Thanks! The author is not affiliated with the modflow family nor Flopy. This
converter/generator uses the Flopy load function. Any errors/mistakes in the Flopy load functions propagate to the
generated script. The author has absolutely no convidense in that this script is correct and is not responsible for
the content and consequences of malicious scripts.
"""
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class InputAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values.name == "<stdin>" or values.name == "<stdout>":
raise argparse.ArgumentTypeError(
"Cannot use stdin for inbytesfile or stdout for outbytesfile"
)
setattr(namespace, self.dest, values)
parser = MyParser(
prog='flopymetascript',
usage=usage,
description=description,
epilog=epilog)
parser.add_argument(
'--version', action='version', version='%(prog)s 0.1.0')
inputs = parser.add_mutually_exclusive_group()
output = parser.add_mutually_exclusive_group()
inputs.add_argument(
'--inbase64file',
type=argparse.FileType('r'),
required=False,
help='Filename or - for stdin of the input zipfile')
output.add_argument(
'--outbase64file',
type=argparse.FileType('w'),
required=False,
help='Filename or - for stdout of the output zipfile')
inputs.add_argument(
'--inbytesfile',
type=argparse.FileType('rb'),
required=False,
action=InputAction,
help='Filename of the input zipfile')
output.add_argument(
'--outbytesfile',
type=argparse.FileType('wb'),
required=False,
action=InputAction,
help='Filename of the output zipfile')
parser.add_argument(
'--logfile',
type=argparse.FileType('w'),
required=False,
help='Filename or - for stdout of the logfile')
# Access the arguments as a dictionary
kwargs = vars(parser.parse_args())
# Pass all the arguments to the process function
process(**kwargs)
def process(inbase64file=None,
outbase64file=None,
inbytesfile=None,
outbytesfile=None,
logfile=None):
"""
All arguments are filehandles. Assumes sane filehandles, no checking for incompatible stdin stdout combinations.
:param logfile: File handle with a write utf8 attribute of the logfile
:param inbase64file: File handle with a read utf8 attribute of the input zipfile and encoded with base64
:param outbase64file: File handle with a write utf8 attribute of the output zipfile and is encoded with base64
:param inbytesfile: File handle with a read bytes attribute of the input zipfile
:param outbytesfile: File handle with a write bytes attribute of the output zipfile
:return:
"""
stdout_buf = io.StringIO()
with redirect_stdout(stdout_buf):
if logfile:
print('\nRedirected the stdout to a temporary buffer\n')
if logfile:
print('\nAbout to import metafunctions.run\n')
# Because flopy writes stuff to stdout while importing
from .metafunctions import run, eval_input
if inbytesfile:
print('\ninbytes file handle\n')
inbytes = inbytesfile
elif inbase64file:
print('\ninbase64 file handle\n')
inbytes = io.BytesIO()
if hasattr(inbase64file, 'read'):
inbytes.write(base64.b64decode(inbase64file.read()))
else:
inbytes.write(base64.b64decode(inbase64file))
inbytes.seek(0)
else:
print('\nNo input files are given. I am about to throw an error\n')
os.error('No input files are given')
# To prevent error messages when run without arguments
if inbytesfile or inbase64file:
bytesZip = run(inbytes)
bytesZip.seek(0)
# evaluate the input files and write report to log
eval_input(inbytes)
if logfile:
stdout_buf.seek(0)
shutil.copyfileobj(stdout_buf, logfile)
# write output
if outbytesfile:
shutil.copyfileobj(bytesZip, outbytesfile)
elif outbase64file:
outbase64file.write(base64.urlsafe_b64encode(bytesZip.read()).decode())
# outb64 = base64.b64encode(bytesZip.read())
# outbase64file.write(outb64.decode('ascii'))
else:
print('Im not doing anything')
return
| |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_custom_property_definition_info
except ImportError:
bt_custom_property_definition_info = sys.modules[
"onshape_client.oas.models.bt_custom_property_definition_info"
]
try:
from onshape_client.oas.models import bt_part_appearance_info
except ImportError:
bt_part_appearance_info = sys.modules[
"onshape_client.oas.models.bt_part_appearance_info"
]
try:
from onshape_client.oas.models import bt_part_material_info
except ImportError:
bt_part_material_info = sys.modules[
"onshape_client.oas.models.bt_part_material_info"
]
try:
from onshape_client.oas.models import bt_thumbnail_info
except ImportError:
bt_thumbnail_info = sys.modules["onshape_client.oas.models.bt_thumbnail_info"]
class BTPartMetadataInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("state",): {
"IN_PROGRESS": "IN_PROGRESS",
"PENDING": "PENDING",
"RELEASED": "RELEASED",
"OBSOLETE": "OBSOLETE",
"REJECTED": "REJECTED",
"DISCARDED": "DISCARDED",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"appearance": (bt_part_appearance_info.BTPartAppearanceInfo,), # noqa: E501
"body_type": (str,), # noqa: E501
"configuration_id": (str,), # noqa: E501
"custom_properties": ({str: (str,)},), # noqa: E501
"custom_property_definitions": (
{
str: (
bt_custom_property_definition_info.BTCustomPropertyDefinitionInfo,
)
},
), # noqa: E501
"description": (str,), # noqa: E501
"element_id": (str,), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"is_flattened_body": (bool,), # noqa: E501
"is_hidden": (bool,), # noqa: E501
"is_mesh": (bool,), # noqa: E501
"material": (bt_part_material_info.BTPartMaterialInfo,), # noqa: E501
"microversion_id": (str,), # noqa: E501
"name": (str,), # noqa: E501
"ordinal": (int,), # noqa: E501
"part_id": (str,), # noqa: E501
"part_number": (str,), # noqa: E501
"part_query": (str,), # noqa: E501
"product_line": (str,), # noqa: E501
"project": (str,), # noqa: E501
"property_source_types": ({str: (int,)},), # noqa: E501
"referencing_configured_part_node_ids": ([str],), # noqa: E501
"revision": (str,), # noqa: E501
"state": (str,), # noqa: E501
"thumbnail_configuration_id": (str,), # noqa: E501
"thumbnail_info": (bt_thumbnail_info.BTThumbnailInfo,), # noqa: E501
"title1": (str,), # noqa: E501
"title2": (str,), # noqa: E501
"title3": (str,), # noqa: E501
"unflattened_part_id": (str,), # noqa: E501
"vendor": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"appearance": "appearance", # noqa: E501
"body_type": "bodyType", # noqa: E501
"configuration_id": "configurationId", # noqa: E501
"custom_properties": "customProperties", # noqa: E501
"custom_property_definitions": "customPropertyDefinitions", # noqa: E501
"description": "description", # noqa: E501
"element_id": "elementId", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"is_flattened_body": "isFlattenedBody", # noqa: E501
"is_hidden": "isHidden", # noqa: E501
"is_mesh": "isMesh", # noqa: E501
"material": "material", # noqa: E501
"microversion_id": "microversionId", # noqa: E501
"name": "name", # noqa: E501
"ordinal": "ordinal", # noqa: E501
"part_id": "partId", # noqa: E501
"part_number": "partNumber", # noqa: E501
"part_query": "partQuery", # noqa: E501
"product_line": "productLine", # noqa: E501
"project": "project", # noqa: E501
"property_source_types": "propertySourceTypes", # noqa: E501
"referencing_configured_part_node_ids": "referencingConfiguredPartNodeIds", # noqa: E501
"revision": "revision", # noqa: E501
"state": "state", # noqa: E501
"thumbnail_configuration_id": "thumbnailConfigurationId", # noqa: E501
"thumbnail_info": "thumbnailInfo", # noqa: E501
"title1": "title1", # noqa: E501
"title2": "title2", # noqa: E501
"title3": "title3", # noqa: E501
"unflattened_part_id": "unflattenedPartId", # noqa: E501
"vendor": "vendor", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_part_metadata_info.BTPartMetadataInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
appearance (bt_part_appearance_info.BTPartAppearanceInfo): [optional] # noqa: E501
body_type (str): [optional] # noqa: E501
configuration_id (str): [optional] # noqa: E501
custom_properties ({str: (str,)}): [optional] # noqa: E501
custom_property_definitions ({str: (bt_custom_property_definition_info.BTCustomPropertyDefinitionInfo,)}): [optional] # noqa: E501
description (str): [optional] # noqa: E501
element_id (str): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
is_flattened_body (bool): [optional] # noqa: E501
is_hidden (bool): [optional] # noqa: E501
is_mesh (bool): [optional] # noqa: E501
material (bt_part_material_info.BTPartMaterialInfo): [optional] # noqa: E501
microversion_id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
ordinal (int): [optional] # noqa: E501
part_id (str): [optional] # noqa: E501
part_number (str): [optional] # noqa: E501
part_query (str): [optional] # noqa: E501
product_line (str): [optional] # noqa: E501
project (str): [optional] # noqa: E501
property_source_types ({str: (int,)}): [optional] # noqa: E501
referencing_configured_part_node_ids ([str]): [optional] # noqa: E501
revision (str): [optional] # noqa: E501
state (str): [optional] # noqa: E501
thumbnail_configuration_id (str): [optional] # noqa: E501
thumbnail_info (bt_thumbnail_info.BTThumbnailInfo): [optional] # noqa: E501
title1 (str): [optional] # noqa: E501
title2 (str): [optional] # noqa: E501
title3 (str): [optional] # noqa: E501
unflattened_part_id (str): [optional] # noqa: E501
vendor (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| |
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for plot."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from lightweight_mmm import lightweight_mmm
from lightweight_mmm import plot
from lightweight_mmm import preprocessing
class PlotTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.mock_ax_scatter = self.enter_context(
mock.patch.object(plot.plt.Axes, "scatter", autospec=True))
self.mock_sns_lineplot = self.enter_context(
mock.patch.object(plot.sns, "lineplot", autospec=True))
self.mock_plt_plot = self.enter_context(
mock.patch.object(plot.plt.Axes, "plot", autospec=True))
def test_plot_model_fit_plot_called_with_scaler(self):
target_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean)
target = target_scaler.fit_transform(jnp.ones(50))
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, 3)),
target=target,
total_costs=jnp.repeat(50, 3),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_model_fit(media_mix_model=mmm, target_scaler=target_scaler)
self.assertTrue(self.mock_plt_plot.called)
def test_plot_model_fit_plot_called_without_scaler(self):
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, 3)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, 3),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_model_fit(media_mix_model=mmm)
self.assertTrue(self.mock_plt_plot.called)
def test_plot_response_curves_plots_n_times_with_correct_params(
self):
n_channels = 5
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, n_channels)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, n_channels),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_response_curves(media_mix_model=mmm)
_, call_kwargs = self.mock_sns_lineplot.call_args_list[0]
# n channels times 2 charts.
self.assertEqual(self.mock_sns_lineplot.call_count,
2 * mmm.n_media_channels)
self.assertEqual(call_kwargs["x"].max(), 1.2)
def test_plot_response_curves_with_prices_plots_n_times_with_correct_params(
self):
n_channels = 5
prices = jnp.array([1., 0.8, 2., 3., 1.])
expected_maxes = jnp.repeat(jnp.array([1.2, 0.96, 2.4, 3.6, 1.2]), 2)
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, n_channels)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, n_channels),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_response_curves(media_mix_model=mmm, prices=prices)
calls_list = self.mock_sns_lineplot.call_args_list
self.assertEqual(self.mock_sns_lineplot.call_count, n_channels * 2)
for (_, call_kwargs), expected_max in zip(calls_list, expected_maxes):
self.assertAlmostEqual(call_kwargs["x"].max().item(),
expected_max,
places=4)
def test_plot_response_curves_produces_y_axis_starting_at_zero(self):
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, 3)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, 3),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_response_curves(media_mix_model=mmm)
calls_list = self.mock_sns_lineplot.call_args_list
for _, call_kwargs in calls_list[:3]:
self.assertEqual(call_kwargs["y"].min().item(), 0)
def test_plot_response_curves_scales_with_media_scaler(self):
media_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean)
factor = 5
media_scaler.fit(jnp.ones(5) * factor)
expected_maxes = jnp.repeat(
jnp.repeat(jnp.array([1.2]), repeats=5),
repeats=2)
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, 5)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, 5),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_response_curves(media_mix_model=mmm,
media_scaler=media_scaler)
calls_list = self.mock_plt_plot.call_args_list
for (_, call_kwargs), expected_max in zip(calls_list, expected_maxes):
self.assertAlmostEqual(call_kwargs["x"].max().item(),
expected_max * factor,
places=4)
def test_plot_response_curves_scales_with_target_scaler(self):
target_scaler = preprocessing.CustomScaler(divide_operation=jnp.mean)
factor = 5
target_scaler.fit(jnp.ones(50) * factor)
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, 5)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, 5),
number_warmup=5,
number_samples=5,
number_chains=1)
plot.plot_response_curves(media_mix_model=mmm,
target_scaler=target_scaler)
calls_list = self.mock_plt_plot.call_args_list
for _, call_kwargs in calls_list:
self.assertAlmostEqual(call_kwargs["y"].max().item(),
1 * factor,
places=4)
def test_perfect_correlation_returns_correct_output(self):
x = jnp.arange(100)
y = jnp.arange(100, 200)
idx, maxcorr = plot.plot_cross_correlate(x, y)
self.assertEqual(idx, 0)
self.assertEqual(maxcorr, 1)
def test_var_cost_plot_called_with_correct_kwargs(self):
media = jnp.arange(10).reshape((5, 2))
costs = [1, 2]
names = ["a", "b"]
std = jnp.repeat(2.82842712, 2)
means = jnp.array([4, 5])
expected_coef_of_variation = std / means
_ = plot.plot_var_cost(media, costs, names)
_, call_kwargs = self.mock_ax_scatter.call_args_list[0]
np.testing.assert_array_almost_equal(call_kwargs["x"], costs)
np.testing.assert_array_almost_equal(call_kwargs["y"],
expected_coef_of_variation)
def test_number_subplots_equals_number_channels(self):
n_channels = 4
n_columns = 3
mmm = lightweight_mmm.LightweightMMM()
mmm.fit(
media=jnp.ones((50, n_channels)),
target=jnp.ones(50),
total_costs=jnp.repeat(50, n_channels),
number_warmup=5,
number_samples=5,
number_chains=1)
fig = plot.plot_media_channel_posteriors(
media_mix_model=mmm, n_columns=n_columns)
self.assertLen(fig.get_axes(), n_channels)
def test_unequal_length_ground_truth_and_predictions_raises_error(self):
prediction = jnp.arange(10).reshape((5, 2))
ground_truth = jnp.array([1, 2, 3])
with self.assertRaises(ValueError):
plot.plot_out_of_sample_model_fit(prediction, ground_truth)
if __name__ == "__main__":
absltest.main()
| |
from cog.forms import *
from cog.models import *
from cog.models.constants import LEAD_ORGANIZATIONAL_ROLES_DICT, \
ROLE_CATEGORY_LEAD, ROLE_CATEGORY_MEMBER, MANAGEMENT_BODY_CATEGORY_STRATEGIC, \
MANAGEMENT_BODY_CATEGORY_OPERATIONAL
from constants import PERMISSION_DENIED_MESSAGE
from django.contrib.auth.decorators import login_required
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render
from django.template import RequestContext
from django.utils.functional import curry
from utils import getProjectNotActiveRedirect, getProjectNotVisibleRedirect
from cog.models.navbar import TABS, TAB_LABELS
from cog.views.views_templated import templated_page_display
from cog.models.auth import userHasAdminPermission
# management_body_update proj.short_name.lower category
def governance_display(request, project_short_name, tab):
"""
:param request:
:param project_short_name:
:param tab:
:return:
Dispatcher for display of governance pages.
"""
template_page = 'cog/governance/_governance.html'
template_title = TAB_LABELS[tab]
if tab == TABS["GOVERNANCE"]:
template_title = 'Governance Overview'
template_form_pages = {reverse("governance_overview_update", args=[project_short_name]): 'Governance Overview'}
elif tab == TABS["BODIES"]:
template_form_pages = {reverse("management_body_update", args=[project_short_name, 'Strategic']):
'Strategic Bodies', reverse("management_body_update", args=[project_short_name,
'Operational']): 'Operational Bodies'}
elif tab == TABS["ROLES"]:
template_form_pages = {reverse("organizational_role_update", args=[project_short_name]): 'Roles'}
elif tab == TABS["PROCESSES"]:
template_form_pages = {reverse("governance_processes_update", args=[project_short_name]): 'Processes'}
elif tab == TABS["COMMUNICATION"]:
template_form_pages = {reverse("communication_means_update", args=[project_short_name]): 'Communications'}
return templated_page_display(request, project_short_name, tab, template_page, template_title, template_form_pages)
# view to update the project Management Body objects
@login_required
def management_body_update(request, project_short_name, category):
# initialize ManagementBodyPurpose choices
if len(ManagementBodyPurpose.objects.all()) == 0:
initManagementBodyPurpose()
# use different forms to limit selection of ManagementBodyPurpose
if category == MANAGEMENT_BODY_CATEGORY_STRATEGIC:
objectTypeForm = StrategicManagementBodyForm
formsetType = StrategicManagementBodyInlineFormset
else:
objectTypeForm = OperationalManagementBodyForm
formsetType = OperationalManagementBodyInlineFormset
# delegate to view for generic governance object
tab = TABS["BODIES"]
redirect = HttpResponseRedirect(reverse('governance_display', args=[project_short_name.lower(), tab]))
return governance_object_update(request, project_short_name, tab, ManagementBody, objectTypeForm, formsetType,
'%s Management Bodies Update' % category, 'cog/governance/management_body_form.html'
, redirect)
# view to update the project Communication Means objects
@login_required
def communication_means_update(request, project_short_name):
tab = TABS["COMMUNICATION"]
formsetType = InternalCommunicationMeansInlineFormset
redirect = HttpResponseRedirect(reverse('governance_display', args=[project_short_name.lower(), tab]))
# delegate to view for generic governance object
return governance_object_update(request, project_short_name, tab, CommunicationMeans, CommunicationMeansForm,
formsetType, 'Communications Update',
'cog/governance/communication_means_form.html', redirect)
@login_required
def governance_overview_update(request, project_short_name):
# retrieve project from database
project = get_object_or_404(Project, short_name__iexact=project_short_name)
# check permission
if not userHasAdminPermission(request.user, project):
return HttpResponseForbidden(PERMISSION_DENIED_MESSAGE)
# GET request
if request.method == 'GET':
# create form object from model
form = GovernanceOverviewForm(instance=project)
# render form
return render_governance_overview_form(request, form, project)
# POST request
else:
# update object from form data
form = GovernanceOverviewForm(request.POST, instance=project)
# validate form data
if form.is_valid():
# persist changes
project = form.save()
# redirect to governance display (GET-POST-REDIRECT)
tab = 'governance'
return HttpResponseRedirect(reverse('governance_display', args=[project.short_name.lower(), tab]))
# return to form
else:
print 'Form is invalid %s' % form.errors
return render_governance_overview_form(request, form, project)
# Subclass of BaseInlineFormset that is used to
# sub-select the 'strategic' instances of ManagementBody specific to a given project
class StrategicManagementBodyInlineFormset(BaseInlineFormSet):
def get_queryset(self):
# standard BaseInlineFormSet that sub-selects by instance=project
querySet = super(StrategicManagementBodyInlineFormset, self).get_queryset()
# additionally sub-select by category='Strategic'
return querySet.filter(category='Strategic')
class OperationalManagementBodyInlineFormset(BaseInlineFormSet):
def get_queryset(self):
return super(OperationalManagementBodyInlineFormset, self).get_queryset().filter(category='Operational')
class InternalCommunicationMeansInlineFormset(BaseInlineFormSet):
def get_queryset(self):
return super(InternalCommunicationMeansInlineFormset, self).get_queryset().filter(internal=True)
# Generic view for updating a governance object.
#
# The object must have the following attributes and methods:
# obj.project
# obj.__unicode__
def governance_object_update(request, project_short_name, tab, objectType, objectTypeForm, formsetType, title, template,
redirect):
# retrieve project from database
project = get_object_or_404(Project, short_name__iexact=project_short_name)
# check permission
if not userHasAdminPermission(request.user, project):
return HttpResponseForbidden(PERMISSION_DENIED_MESSAGE)
# initialize formset factory for this governance object
ObjectFormSet = inlineformset_factory(Project, objectType, extra=1, form=objectTypeForm, formset=formsetType, fields="__all__")
# GET request
if request.method == 'GET':
# create formset instance associated to current project
formset = ObjectFormSet(instance=project)
return render_governance_object_form(request, project, formset, title, template)
# POST method
else:
# update formset from POST data
formset = ObjectFormSet(request.POST, instance=project)
if formset.is_valid():
# save changes to database
instances = formset.save()
# set additional object flags
for instance in instances:
dict = {}
if formsetType == InternalCommunicationMeansInlineFormset:
dict['internal'] = True
instance.set_category(dict=dict)
instance.save()
# redirect to governance display (GET-POST-REDIRECT)
return redirect
else:
print 'Formset is invalid %s' % formset.errors
return render_governance_object_form(request, project, formset, title, template)
def render_governance_object_form(request, project, formset, title, template):
return render(request,
template,
{'title': title, 'project': project, 'formset': formset})
# view to update a Communication Means object members
@login_required
def communication_means_members(request, object_id):
commnicationMeans = get_object_or_404(CommunicationMeans, pk=object_id)
# create form class with current project
#communicationMeansMemberForm = staticmethod(curry(CommunicationMeansMemberForm, project=commnicationMeans.project))
# delegate to generic view with specific object types
tab = TABS["COMMUNICATION"]
redirect = reverse('governance_display', args=[commnicationMeans.project.short_name.lower(), tab])
return members_update(request, tab, object_id, CommunicationMeans, CommunicationMeansMember, CommunicationMeansMemberForm, redirect)
# view to update an Organizational Role object members
@login_required
def organizational_role_members(request, object_id):
organizationalRole = get_object_or_404(OrganizationalRole, pk=object_id)
# create form class with current project
#organizationalRoleMemberForm = staticmethod(curry(OrganizationalRoleMemberForm, project=organizationalRole.project))
# delegate to generic view with specific object types
tab = TABS["ROLES"]
redirect = reverse('governance_display', args=[organizationalRole.project.short_name.lower(), tab])
return members_update(request, tab, object_id, OrganizationalRole, OrganizationalRoleMember, OrganizationalRoleMemberForm, redirect)
# view to update a Management Body object members
@login_required
def management_body_members(request, project_short_name, object_id):
# retrieve object
managementBody = get_object_or_404(ManagementBody, pk=object_id)
# delegate to generic view with specific object types
tab = TABS["BODIES"]
redirect = reverse('governance_display', args=[managementBody.project.short_name.lower(), tab])
return members_update(request, tab, object_id, ManagementBody, ManagementBodyMember, ManagementBodyMemberForm, redirect)
#
# Generic view to update members for:
# -) objectType=CommunicationMeans, objectMemberType=CommunicationMeansMember
# -) objectType=ManagementBody, objectMemberType=ManagementBodyMember
#
# The object must have the following attributes and methods:
# obj.project
# obj.__unicode__
#
def members_update(request, tab, objectId, objectType, objectMemberType, objectMemberForm, redirect):
# retrieve governance object
obj = get_object_or_404(objectType, pk=objectId)
# check permission
if not userHasAdminPermission(request.user, obj.project):
return HttpResponseForbidden(PERMISSION_DENIED_MESSAGE)
# formset factory
users_queryset = projectUsersQuerySet(obj.project)
ObjectFormSet = inlineformset_factory(objectType, objectMemberType, form=objectMemberForm, extra=3, fields="__all__")
# GET request
if request.method == 'GET':
# retrieve current members
formset = ObjectFormSet(instance=obj)
for form in formset.forms:
form.fields['user'].queryset = users_queryset
# render view
return render_members_form(request, obj, formset, redirect)
# POST request
else:
formset = ObjectFormSet(request.POST, instance=obj)
if formset.is_valid():
# save updated members
instances = formset.save()
# redirect to display (GET-POST-REDIRECT)
return HttpResponseRedirect(redirect)
else:
print 'Formset is invalid: %s' % formset.errors
# redirect to form view
return render_members_form(request, obj, formset, redirect)
def render_members_form(request, object, formset, redirect):
return render(request,
'cog/governance/members_form.html',
{'title': '%s Members Update' % object,
'project': object.project,
'formset': formset, 'redirect': redirect})
@login_required
def processes_update(request, project_short_name):
# retrieve project from database
project = get_object_or_404(Project, short_name__iexact=project_short_name)
# check permission
if not userHasAdminPermission(request.user, project):
return HttpResponseForbidden(PERMISSION_DENIED_MESSAGE)
# GET request
if request.method == 'GET':
# create form object from model
form = GovernanceProcessesForm(instance=project)
# render form
return render_governance_processes_form(request, form, project)
# POST request
else:
# update object from form data
form = GovernanceProcessesForm(request.POST, instance=project)
# validate form data
if form.is_valid():
# persist changes
project = form.save()
# redirect to governance display (GET-POST-REDIRECT)
tab = 'processes'
return HttpResponseRedirect(reverse('governance_display', args=[project.short_name.lower(), tab]))
# return to form
else:
print 'Form is invalid %s' % form.errors
return render_governance_processes_form(request, form, project)
def render_governance_processes_form(request, form, project):
return render(request,
'cog/governance/governance_processes_form.html',
{'title': 'Governance Processes Update', 'project': project, 'form': form} )
def render_governance_overview_form(request, form, project):
return render(request,
'cog/governance/governance_overview_form.html',
{'title': 'Governance Overview Update', 'project': project, 'form': form})
# Method to update an organizational role
@login_required
def organizational_role_update(request, project_short_name):
# retrieve project from database
project = get_object_or_404(Project, short_name__iexact=project_short_name)
# check permission
if not userHasAdminPermission(request.user, project):
return HttpResponseForbidden(PERMISSION_DENIED_MESSAGE)
# must build the formset via non-traditional means to pass the current project as a class attribute
OrganizationalRoleFormSet = inlineformset_factory(Project, OrganizationalRole, extra=1, can_delete=True,
form=OrganizationalRoleForm, fields="__all__")
# GET request
if request.method == 'GET':
# create formset backed up by current saved instances
organizational_role_formset = OrganizationalRoleFormSet(instance=project)
# display form view
return render_organizational_role_form(request, project, organizational_role_formset)
# POST request
else:
organizational_role_formset = OrganizationalRoleFormSet(request.POST, instance=project)
# validate formset
if organizational_role_formset.is_valid():
# save changes to databaase
orgrole_instances = organizational_role_formset.save()
# assign role category and save again
for role in orgrole_instances:
role.set_category()
role.save()
# redirect to governance display (GET-POST-REDIRECT)
tab = 'roles'
return HttpResponseRedirect(reverse('governance_display', args=[project.short_name.lower(), tab]))
else:
print 'Organizational Role formset is invalid: %s' % organizational_role_formset.errors
# redorect to form
return render_organizational_role_form(request, project, organizational_role_formset)
def render_organizational_role_form(request, project, formset):
return render(request,
'cog/governance/organizational_role_form.html',
{'title': 'Organizational Roles Update', 'project': project, 'formset': formset})
| |
from abc import ABCMeta, abstractmethod
import math
import sys
from apsis.utilities import logging_utils
class ParamDef(object):
"""
This represents the base class for a parameter definition.
Every member of this class has to implement at least the
is_in_parameter_domain method to check whether objects are in the parameter
domain.
"""
__metaclass__ = ABCMeta
_logger = None
def __init__(self):
self._logger = logging_utils.get_logger(self)
@abstractmethod
def is_in_parameter_domain(self, value):
"""
Should test whether a certain value is in the parameter domain as
defined by this class.
Note that you have to test a value that is not warped in here. A
warped-in value can be tested by checking whether it is in [0, 1].
Parameters
----------
value : object
Tests whether the object is in the parameter domain.
Returns
-------
is_in_parameter_domain : bool
True iff value is in the parameter domain as defined by this
instance.
"""
pass
def distance(self, valueA, valueB):
"""
Returns the distance between `valueA` and `valueB`.
In this case, it's 0 iff valueA == valueB, 1 otherwise.
"""
if valueA == valueB:
self._logger.debug("Values are identical; returning distance 0")
return 0
self._logger.debug("Values are different; returning distance 1")
return 1
def to_dict(self):
"""
This returns a dictionary from which we can build a new instance of
the parameter.
Returns
-------
dict : dictionary
The dictionary from which we can rebuild this parameter definition.
"""
self._logger.debug("Converting param_def to dict")
result_dict = dict(self.__dict__)
del result_dict["_logger"]
result_dict["type"] = self.__class__.__name__
self._logger.debug("Final converted param_def dict %s", result_dict)
return result_dict
@abstractmethod
def warp_in(self, unwarped_value):
"""
Warps value_in into a [0, 1] hypercube represented by a list.
Parameters
----------
unwarped_value :
The value to be warped in. Has to be in parameter domain of this
class.
Returns
-------
warped_value : list of floats in [0, 1]
The warped value. Length of the list is equal to the return of
warped_size()
"""
pass
@abstractmethod
def warp_out(self, warped_value):
"""
Warps a [0, 1] hypercube position representing a value to said value.
Parameters
----------
warped_value : list of floats in [0, 1]
The warped value. Length of the list is equal to the return of
warped_size()
Returns
-------
unwarped_value :
The value to be warped in. Has to be in parameter domain of this
class.
"""
pass
@abstractmethod
def warped_size(self):
"""
Returns the size a list of this parameters' warped values will have.
"""
pass
class ComparableParamDef(object):
"""
This class defines an ordinal parameter definition subclass, that is a
parameter definition in which the values are comparable.
It additionally implements the compare_values_function.
"""
__metaclass__ = ABCMeta
@abstractmethod
def compare_values(self, one, two):
"""
Compare values one and two of this datatype.
It has to follow the same return semantics as the Python standard
__cmp__ methods, meaning it returns negative integer if one < two,
zero if one == two, a positive integer if one > two.
Parameters
----------
one : object in parameter definition
The first value used in comparison.
two : object in parameter definition
The second value used in comparison.
Returns
-------
comp: integer
comp < 0 iff one < two.
comp = 0 iff one = two.
comp > 0 iff one > two.
"""
pass
class NominalParamDef(ParamDef):
"""
This defines a nominal parameter definition.
A nominal parameter definition is defined by the values as given in the
init function. These are a list of possible values it can take.
"""
values = None
def __init__(self, values):
"""
Instantiates the NominalParamDef instance.
Parameters
----------
values : list
A list of values which are the possible values that are in this
parameter definition.
Raises
------
ValueError :
Iff values is not a list, or values is an empty list.
"""
super(NominalParamDef, self).__init__()
self._logger.debug("Initializing nominal param def with values %s",
values)
if not isinstance(values, list):
raise ValueError(
"You created a NominalParameterDef object without "
"specifying the possible values list.")
if len(values) < 1:
raise ValueError(
"You need to specify a list of all possible values for this "
"data type in order to make it being used for your "
"optimization! The given list was empty: " + str(values)
)
self.values = values
def is_in_parameter_domain(self, value):
"""
Tests whether value is in self.values as defined during the init
function.
"""
self._logger.debug("Testing whether %s is in param domain", value)
is_in_param_domain = value in self.values
self._logger.debug("In param domain: %s", is_in_param_domain)
return is_in_param_domain
def warp_in(self, unwarped_value):
self._logger.debug("Warping in %s", unwarped_value)
warped_value = [0]*len(self.values)
warped_value[self.values.index(unwarped_value)] = 1
self._logger.debug("Results in %s", warped_value)
return warped_value
def warp_out(self, warped_value):
self._logger.debug("Warping out %s", warped_value)
warped_value = list(warped_value)
unwarped_value = self.values[warped_value.index(max(warped_value))]
self._logger.debug("Results in %s", unwarped_value)
return unwarped_value
def warped_size(self):
warped_size = len(self.values)
self._logger.debug("Warped size: %s", warped_size)
return warped_size
class OrdinalParamDef(NominalParamDef, ComparableParamDef):
"""
Defines an ordinal parameter definition.
This class inherits from NominalParamDef and ComparableParameterDef, and
consists of basically a list of possible values with a defined order. This
defined order is simply the order in which the elements are in the list.
"""
def __init__(self, values):
super(OrdinalParamDef, self).__init__(values)
def compare_values(self, one, two):
"""
Compare values of this ordinal data type. Return is the same
semantic as in __cmp__.
Comparison takes place based on the index the given values one and
two have in the values list in this object. Meaning if this ordinal
parameter definition has a values list of [3,5,1,4]', then '5' will be
considered smaller than '1' and '1' bigger than '5' because the index
of '1' in this list is higher than the index of '5'.
"""
self._logger.debug("Comparing %s and %s", one, two)
if one not in self.values or two not in self.values:
raise ValueError(
"Values not comparable! Either one or the other is not in the "
"values domain")
comparison = 0
if self.values.index(one) < self.values.index(two):
comparison = -1
if self.values.index(one) > self.values.index(two):
comparison = 1
self._logger.debug("Results in %s", comparison)
return comparison
def distance(self, valueA, valueB):
"""
This distance is defined as the absolute difference between the values'
position in the list, normed to the [0, 1] hypercube.
"""
self._logger.debug("Computing distance between %s and %s",
valueA, valueB)
if valueA not in self.values or valueB not in self.values:
raise ValueError(
"Values not comparable! Either one or the other is not in the "
"values domain")
indexA = self.values.index(valueA)
indexB = self.values.index(valueB)
diff = abs(indexA - indexB)
dist = float(diff)/len(self.values)
self._logger.debug("Distance is %s", dist)
return dist
class NumericParamDef(ParamDef, ComparableParamDef):
"""
This class defines a numeric parameter definition.
It is characterized through the existence of a warp_in and a warp_out
function. The warp_in function squishes the whole parameter space to the
unit space [0, 1], while the warp_out function reverses this.
Note that it is necessary that
x = warp_in(warp_out(x)) for x in [0, 1] and
x = warp_out(warp_in(x)) for x in allowed parameter space.
"""
warping_in = None
warping_out = None
def __init__(self, warping_in, warping_out):
"""
Initializes the Numeric Param Def.
Parameters
----------
warping_in : function
warping_in must take a value and return a corresponding value in
the [0, 1] space.
warping_out : function
warping_out is the reverse function to warping_in.
"""
super(NumericParamDef, self).__init__()
self.warping_in = warping_in
self.warping_out = warping_out
def is_in_parameter_domain(self, value):
"""
Uses the warp_out function for tests.
"""
self._logger.debug("Testing whether %s is in param_domain", value)
if 0 <= self.warp_in(value)[0] <= 1:
self._logger.debug("It is.")
return True
self._logger.debug("It is not.")
return False
def warp_in(self, unwarped_value):
self._logger.debug("Warping %s in.", unwarped_value)
warped_value = [self.warping_in(unwarped_value)]
self._logger.debug("Results in %s", warped_value)
return warped_value
def warp_out(self, warped_value):
self._logger.debug("Warping %s out", warped_value)
warped_out = self.warping_out(warped_value[0])
self._logger.debug("Warped out: %s", warped_out)
return warped_out
def warped_size(self):
self._logger.debug("Warped size is always 1.")
return 1
def compare_values(self, one, two):
self._logger.debug("Comparing %s and %s", one, two)
if not self.is_in_parameter_domain(one):
raise ValueError("Parameter one = " + str(one) + " not in value "
"domain.")
if not self.is_in_parameter_domain(two):
raise ValueError("Parameter two = " + str(two) + " not in value "
"domain.")
comparison = 0
if one < two:
comparison = -1
elif one > two:
comparison = 1
self._logger.debug("Comparison is %s", comparison)
return comparison
def distance(self, valueA, valueB):
self._logger.debug("Computing distance between %s and %s", valueA,
valueB)
if not self.is_in_parameter_domain(valueA):
raise ValueError("Parameter one = " + str(valueA) + " not in value "
"domain.")
if not self.is_in_parameter_domain(valueB):
raise ValueError("Parameter two = " + str(valueB) + " not in value "
"domain.")
dist = self.warp_in(valueB)[0] - self.warp_in(valueA)[0]
self._logger.debug("Distance is %s", dist)
return dist
class MinMaxNumericParamDef(NumericParamDef):
"""
Defines a numeric parameter definition defined by a lower and upper bound.
By default, it will represent a parameter space of [lower_bound,
upper_bound]. However, it can be set to exclude one or both of the bounds.
"""
lower_bound = None
upper_bound = None
include_lower = None
include_upper = None
epsilon = None
def __init__(self, lower_bound, upper_bound,
include_lower=True, include_upper=True, epsilon=None):
"""
Initializes the lower/upper bound defined parameter space.
Parameters
----------
lower_bound : float
The lowest possible value
upper_bound : float
The highest possible value
include_lower : bool, optional
If true (default), lower_bound is the smallest possible value that
can be returned. If false, all returned values will be greater than
lower_bound.
include_upper : bool, optional
If true (default), upper_bound is the greatest possible value that
can be returned. If false, all returned values will be less than
upper_bound.
epsilon : float, optional
The tolerance to use if excluding upper/lower. The lowest or
highest value will be epsilon away from the given lower or upper
bound. By default, this is ten times the system's float epsilon.
"""
self._logger = logging_utils.get_logger(self)
self._logger.debug("Initializing MinMaxParamDef. Parameters are "
"lower bound %s, upper_bound %s, include_lower %s,"
"include_upper %s and epsilon %s",
lower_bound, upper_bound, include_lower,
include_upper, epsilon)
try:
lower_bound = float(lower_bound)
upper_bound = float(upper_bound)
except:
raise ValueError("Bounds are not floats.")
if epsilon is None:
epsilon = sys.float_info.epsilon * 10
self.epsilon = epsilon
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.include_lower = include_lower
self.include_upper = include_upper
self._logger.debug("Initialized MinMaxParamDef.")
def warp_in(self, unwarped_value):
self._logger.debug("Warping in %s", unwarped_value)
modifed_lower = self.lower_bound + (0 if self.include_lower else self.epsilon )
modifed_upper = self.upper_bound - (0 if self.include_upper else self.epsilon )
result = ((unwarped_value - (modifed_lower))/
(modifed_upper-modifed_lower))
result = [float(result)]
self._logger.debug("Warped out to %s", result)
return result
def warp_out(self, warped_value):
self._logger.debug("Warping out %s", warped_value)
modifed_lower = self.lower_bound + (0 if self.include_lower else self.epsilon )
modifed_upper = self.upper_bound - (0 if self.include_upper else self.epsilon )
result = warped_value[0]*(modifed_upper - modifed_lower) + modifed_lower
result = float(result)
self._logger.debug("Warped out to %s", result)
return result
def warped_size(self):
self._logger.debug("Warped size is always 1.")
return 1
def is_in_parameter_domain(self, value):
self._logger.debug("Testing whether %s is in parameter domain", value)
if not (self.lower_bound < value or
(self.lower_bound <= value and self.include_lower)):
self._logger.debug("Is too small.")
return False
if not (self.upper_bound > value or
(self.upper_bound >= value and self.include_upper)):
self._logger.debug("Is too big.")
return False
self._logger.debug("Seems to fit.")
return True
class PositionParamDef(OrdinalParamDef):
"""
Defines positions for each of its values.
"""
positions = None
def __init__(self, values, positions):
"""
Initializes PositionParamDef
Parameters
----------
values : list
List of the values
positions : list of floats
The corresponding positions of these values. Has to have the same
length as values.
"""
assert len(values) == len(positions)
super(PositionParamDef, self).__init__(values)
self._logger.debug("Initializing position_param_def with values %s and"
"positions %s", values, positions)
self.positions = positions
def warp_in(self, unwarped_value):
self._logger.debug("Warping in %s", unwarped_value)
pos = self.positions[self.values.index(unwarped_value)]
warped_value = float(pos - min(self.positions))/(max(self.positions) - min(self.positions))
self._logger.debug("Warped into %s", [warped_value])
return [warped_value]
def warp_out(self, warped_value):
self._logger.debug("Warping out %s", warped_value)
warped_value = warped_value[0]
if warped_value > 1:
return self.values[-1]
if warped_value < 0:
return self.values[0]
pos = warped_value * (max(self.positions) - min(self.positions)) + min(self.positions)
min_pos_idx = 0
for i, p in enumerate(self.positions):
if abs(p - pos) < abs(self.positions[min_pos_idx] - pos):
min_pos_idx = i
result = self.values[min_pos_idx]
self._logger.debug("Warped out to %s", result)
return result
def warped_size(self):
self._logger.debug("Warped size is always 1.")
return 1
def distance(self, valueA, valueB):
self._logger.debug("Computing distance between %s and %s", valueA,
valueB)
if valueA not in self.values or valueB not in self.values:
raise ValueError(
"Values not comparable! Either one or the other is not in the "
"values domain")
pos_a = self.positions[self.values.index(valueA)]
pos_b = self.positions[self.values.index(valueB)]
diff = abs(pos_a - pos_b)
self._logger.debug("Distance is %s", diff)
return float(diff)
class FixedValueParamDef(PositionParamDef):
"""
Extension of PositionParamDef, in which the position is equal to the value
of each entry from values.
"""
def __init__(self, values):
positions = []
for v in values:
pos = v
positions.append(pos)
super(FixedValueParamDef, self).__init__(values, positions)
self._logger.debug("Initialized FixedValue with %s", values)
def to_dict(self):
param_dict = {"values": self.values,
"type": self.__class__.__name__}
self._logger.debug("Converting to dict: %s", param_dict)
return param_dict
class RangeParamDef(FixedValueParamDef):
"""
Defines a parameter space equivalent to python's range argument.
That is, the parameter space consists of a number of numbers starting
with start (default is 0) followed by integers of the form start + i*step
(with step defaulting to 1) for i = [1, 2, ...] and ending with the
last value that's absolutely smaller than the abs of stop.
Note that this can be slightly more flexible than python's range function:
If we initialize it with ints=False no test for integers is done, meaning
we can use it to generate non-integer sequences.
"""
_start = None
_stop = None
_step = None
_ints = None
def __init__(self, *args, **kwargs):
"""
Initializes the RangeParamDef.
Syntax is mostly as with python's range:
- range(b): Iterate from 0 to b with a step size of 1.
- range(a, b): Iterate from a to b with a step size of 1.
- range(a, b, c): Iterate from a to b with a step size of c.
However, an additional parameter allows us to change the behaviour
away from python's range function to the equivalent function but
allowing non-integer numbers. You can either call it with
- range(a, b, c, BOOL) where BOOL is True or False (the default, True,
means using normal python range behaviour) or by using ints=BOOL as a
kwarg.
- range(kwargs) where it receives exactly four kwargs (used for
reconstruction).
"""
self._logger = logging_utils.get_logger(self)
self._logger.debug("Building a RangeParamDef. *args are %s, *kwargs "
"%s", args, kwargs)
self._start = 0
self._stop = None
self._step = 1
self._ints = kwargs.get("ints", True)
if len(args) == 4:
self._start, self._stop, self._step, self._ints = args
if not kwargs:
raise ValueError("Received four positional arguments plus at"
"least one kwarg argument. Cannot guarantee"
"unambiguity. args were %s, kwargs %s"
%(args, kwargs))
elif len(args) == 3:
self._start, self._stop, self._step = args
elif len(args) == 2:
self._start, self._stop = args
elif len(args) == 1:
self._stop, = args
else:
if len(kwargs) != 4:
raise ValueError("Did not receive the right amount of"
"positional arguments. You can have "
"1, 2, 3 or 4 positional arguments and no "
"kwargs, 1-3 positional and the ints kwarg "
"or exactly four kwargs.")
if len(kwargs) >= 1:
if len(kwargs) == 4:
self._start = kwargs.get("start")
self._stop = kwargs.get("stop")
self._step = kwargs.get("step")
self._ints = kwargs.get("ints")
else:
raise ValueError("Received too many keyword arguments. It is "
"only allowed to specify one of them (ints). "
"kwarg was %s" %(kwargs))
if len(kwargs) == 1 and "ints" not in kwargs:
raise ValueError("Received unknown keyword argument. Only ints is"
"allowed. Received %s." %(kwargs))
if self._ints:
if not isinstance(self._start, (int, long)):
raise ValueError("start is not an integer type but we are "
"forced to only work on integers. Either "
"change start or set ints to False.")
if not isinstance(self._stop, (int, long)):
raise ValueError("stop is not an integer type but we are "
"forced to only work on integers. Either "
"change stop or set ints to False.")
if not isinstance(self._step, (int, long)):
raise ValueError("step is not an integer type but we are "
"forced to only work on integers. Either "
"change step or set ints to False.")
values = []
cur_value = self._start
while abs(cur_value) < abs(self._stop):
values.append(cur_value)
cur_value += self._step
super(RangeParamDef, self).__init__(values)
self._logger.debug("Finished RangeParamDef.")
def to_dict(self):
param_dict = {"start": self._start,
"stop": self._stop,
"step": self._step,
"ints": self._ints,
"type": self.__class__.__name__}
self._logger.debug("Converted to param_dict %s", param_dict)
return param_dict
class EquidistantPositionParamDef(PositionParamDef):
"""
Extension of PositionParamDef, in which the position of each value is
equidistant from its neighbours and their order is determined by their
order in values.
"""
def __init__(self, values):
positions = []
for i, v in enumerate(values):
pos = float(i)/(len(values)-1)
positions.append(pos)
super(EquidistantPositionParamDef, self).__init__(values, positions)
class AsymptoticNumericParamDef(NumericParamDef):
"""
This represents an asymptotic parameter definition.
It consists of a fixed border - represented at 0 - and an asymptotic
border - represented at 1.
In general, multiplying the input parameter by 1/10th means a multiplication
of the warped-in value by 1/2. This means that each interval between
10^-i and 10^-(i-1) is represented by an interval of length 1/2^i on the
hypercube.
For example, assume that you want to optimize over a learning rate.
Generally, they are close to 0, with parameter values (and therefore
possible optimization values) like 10^-1, 10^-4 or 10^-6. This could be
done by initializing this class with asymptotic_border = 0 and border = 1.
Trying to optimize a learning rate decay - which normally is close to 1 -
one could initialize this class with asymptotic_border = 1 and border = 0.
Attributes
----------
asymptotic_border : float
The asymptotic border.
border : float
The non-asymptotic border.
"""
asymptotic_border = None
border = None
def __init__(self, asymptotic_border, border):
"""
Initializes this parameter definition.
Parameters
----------
asymptotic_border : float
The asymptotic border.
border : float
The non-asymptotic border.
"""
self._logger = logging_utils.get_logger(self)
self._logger.debug("Initializing asymptotic param def with asym_border"
" %s and border %s", asymptotic_border, border)
self.asymptotic_border = float(asymptotic_border)
self.border = float(border)
def warp_in(self, unwarped_value):
self._logger.debug("Warping in %s", unwarped_value)
if not min(self.asymptotic_border, self.border) <= unwarped_value:
unwarped_value = min(self.asymptotic_border, self.border)
if not unwarped_value <= max(self.asymptotic_border, self.border):
unwarped_value = max(self.asymptotic_border, self.border)
if unwarped_value == self.border:
self._logger.debug("Special case: Is border. Returning [0].")
return [0]
elif unwarped_value == self.asymptotic_border:
self._logger.debug("Special case: Asymptotic border. Returning "
"[1]")
return [1]
warped_value = [(1-2**(math.log(unwarped_value, 10))) *
(self.border-self.asymptotic_border)+self.asymptotic_border]
self._logger.debug("Normal case. Warped is %s", warped_value)
return warped_value
def warp_out(self, warped_value):
self._logger.debug("Warping out %s", warped_value)
warped_value_single = warped_value[0]
if warped_value_single < 0:
warped_value_single = 0
if warped_value_single > 1:
warped_value_single = 1
if warped_value_single == 1:
self._logger.debug("Special case: Value was 1, therefore "
"asymptotic border.")
return self.asymptotic_border
elif warped_value_single == 0:
self._logger.debug("Special case: Value was 0, therefore "
"border.")
return self.border
unwarped_value = 10**math.log(1-(warped_value_single-
self.asymptotic_border)/
(self.border-self.asymptotic_border), 2)
self._logger.debug("Normal case. Warped out is %s", unwarped_value)
return unwarped_value
def warped_size(self):
self._logger.debug("Warped size is always 1.")
return 1
| |
""" @file L6470_driver.py
This module implements the command set of the L6470 stepper driver.
@authors Anthony Lombardi
@authors John Barry
@date 8 December 2016
"""
from math import ceil as math_ceil
class L6470:
""" @details This class represents an L6470 stepper driver.
Contains representations of all valid commands,
as well as dictionaries for the register addresses
and the contents of the STATUS register.
"""
# === DICTIONARIES ===
""" Dictionary of available registers and their addresses.
"""
REGISTER_DICT = {} # ADDR | LEN | DESCRIPTION | xRESET | Write
REGISTER_DICT['ABS_POS' ]=[0x01, 22] # current pos | 000000 | S
REGISTER_DICT['EL_POS' ]=[0x02, 9] # Electrical pos | 000 | S
REGISTER_DICT['MARK' ]=[0x03, 22] # mark position | 000000 | W
REGISTER_DICT['SPEED' ]=[0x04, 20] # current speed | 00000 | R
REGISTER_DICT['ACC' ]=[0x05, 12] # accel limit | 08A | W
REGISTER_DICT['DEC' ]=[0x06, 12] # decel limit | 08A | W
REGISTER_DICT['MAX_SPEED' ]=[0x07, 10] # maximum speed | 041 | W
REGISTER_DICT['MIN_SPEED' ]=[0x08, 13] # minimum speed | 0 | S
REGISTER_DICT['FS_SPD' ]=[0x15, 10] # full-step speed | 027 | W
REGISTER_DICT['KVAL_HOLD' ]=[0x09, 8] # holding Kval | 29 | W
REGISTER_DICT['KVAL_RUN' ]=[0x0A, 8] # const speed Kval | 29 | W
REGISTER_DICT['KVAL_ACC' ]=[0x0B, 8] # accel start Kval | 29 | W
REGISTER_DICT['KVAL_DEC' ]=[0x0C, 8] # decel start Kval | 29 | W
REGISTER_DICT['INT_SPEED' ]=[0x0D, 14] # intersect speed | 0408 | H
REGISTER_DICT['ST_SLP' ]=[0x0E, 8] # start slope | 19 | H
REGISTER_DICT['FN_SLP_ACC']=[0x0F, 8] # accel end slope | 29 | H
REGISTER_DICT['FN_SLP_DEC']=[0x10, 8] # decel end slope | 29 | H
REGISTER_DICT['K_THERM' ]=[0x11, 4] # therm comp factr | 0 | H
REGISTER_DICT['ADC_OUT' ]=[0x12, 5] # ADC output | XX |
REGISTER_DICT['OCD_TH' ]=[0x13, 4] # OCD threshold | 8 | W
REGISTER_DICT['STALL_TH' ]=[0x14, 7] # STALL threshold | 40 | W
REGISTER_DICT['STEP_MODE' ]=[0x16, 8] # Step mode | 7 | H
REGISTER_DICT['ALARM_EN' ]=[0x17, 8] # Alarm enable | FF | S
REGISTER_DICT['CONFIG' ]=[0x18, 16] # IC configuration | 2E88 | H
REGISTER_DICT['STATUS' ]=[0x19, 16] # Status | XXXX |
REGISTER_DICT['RESERVED A']=[0x1A, 0] # RESERVED | | X
REGISTER_DICT['RESERVED B']=[0x1B, 0] # RESERVED | | X
# Write: X = unreadable, W = Writable (always),
# S = Writable (when stopped), H = Writable (when Hi-Z)
""" Dictionary for the STATUS register. Contains all error flags,
as well as basic motor state information.
"""
STATUS_DICT = {} # [ NAME | OK/DEFAULT VALUE ]
STATUS_DICT[14] = ['STEP_LOSS_B',1] # stall detection on bridge B
STATUS_DICT[13] = ['STEP_LOSS_A',1] # stall detection on bridge A
STATUS_DICT[12] = ['OVERCURRENT',1] # OCD, overcurrent detection
STATUS_DICT[11] = ['HEAT_SHUTDN',1] # TH_SD, thermal shutdown
STATUS_DICT[10] = ['HEAT_WARN ',1] # TH_WN, thermal warning
STATUS_DICT[ 9] = ['UNDERVOLT ',1] # UVLO, low drive supply voltage
STATUS_DICT[ 8] = ['WRONG_CMD ',0] # Unknown command
STATUS_DICT[ 7] = ['NOTPERF_CMD',0] # Command can't be performed
STATUS_DICT[ 3] = ['SWITCH_EDGE',0] # SW_EVN, signals switch falling edge
STATUS_DICT[ 2] = ['SWITCH_FLAG',0] # switch state. 0=open, 1=grounded
STATUS_DICT[15] = ['STEPCK_MODE',0] # 1=step-clock mode, 0=normal
STATUS_DICT[ 4] = ['DIRECTION' ,1] # 1=forward, 0=reverse
STATUS_DICT[ 6] = ['MOTOR_STAT' ,0] # two bits: 00=stopped, 01=accel
# 10=decel, 11=const spd
STATUS_DICT[ 1] = ['BUSY' ,1] # low during movement commands
STATUS_DICT[ 0] = ['Hi-Z' ,1] # 1=hi-Z, 0=motor active
# === CORE FUNCTIONS ===
""" Create a new L6470 instance.
@arg @c spi_handler (obj): reference to the SPI driver this chip will use.
@returns a new instance of an L6470 object.
"""
def __init__(self, spi_handler):
""" Create a new L6470 object.
@arg @c spi_handler The SPI device to use. Guaranteed to work with stmspi::SPIDevice or stmspi::DummyBus.
"""
self.spi = spi_handler
if not hasattr(self.spi,'send_recieve'):
print ('Invalid SPI object.')
raise AttributeError
def __del__(self):
""" Automatically called when the instance is deleted.
Stops the attached motor as a safety precaution.
"""
self.HardHiZ() # stop motors ASAP, for safety
# === L6470 FUNCTION WRAPPERS ===
def Nop (self):
""" No-Operation command. The driver will not react.
"""
# ze goggles
self.spi.send_recieve(0,1,0)
def SetParam (self, register, value):
""" Writes the value <param> to the register named <register>.
@arg @c register (string): A name corresponding to an entry in REGISTER_DICT.
@arg @c value (int): The new value to write to that register.
"""
regdata = L6470.REGISTER_DICT[register]
send_len = math_ceil(regdata[1]/8)
self.spi.send_recieve(0b00000000 + regdata[0], 1, 0)
self.spi.send_recieve(value, send_len, 0)
def GetParam (self, register):
""" Reads the value of the register named <register>.
@arg @c register (string): A name corresponding to an entry in REGISTER_DICT.
@return @c value (byte array): The contents of the selected register.
"""
regdata = L6470.REGISTER_DICT[register]
cmd = 0b00100000 + regdata[0]
send_len = math_ceil(regdata[1]/8)
recv_len = send_len
value = self.spi.send_recieve(cmd, send_len, recv_len)
return value
def Run (self, speed, direction):
""" Sets the target <speed> and <direction>. BUSY flag is low until the
speed target is reached, or the motor hits MAX/MIN_SPEED. Can be
given at any time and runs immediately.
@arg @c speed (int): The target speed. Must be positive.
@arg @c direction (int): Direction to rotate. Must be 1 or 0.
@returns @c -1 if the direction or speed were invalid.
@returns @c 0 if the command ran successfully.
"""
if (direction != 1) and (direction != 0):
return -1 # invalid argument
if speed < 0:# or speed > :
return -1 # invalid argument
self.spi.send_recieve(0b01010000 + direction,1,0)
self.spi.send_recieve(speed,3,0)
return 0
def StepClock (self, direction):
""" Puts the device into step-clock mode and imposes <direction>. Raises
STEPCK_MODE flag and motor is always considered stopped. Mode
will exit if a constant speed, absolute position, or motion
command are issued. Direction can be changed without exiting
step-clock mode by calling StepClock again with the new
direction. BUSY flag does not go low in this mode, but the
command can only be called when the motor is stopped-
NOTPERF_CMD flag will raise otherwise.
@arg @c direction (int): Direction to rotate. Must be 1 or 0.
@returns @c -1 if the direction argument was invalid.
@returns @c 0 if the command ran successfully.
"""
if (direction != 1) and (direction != 0):
return -1 # unpermitted behavior
self.spi.send_recieve(0b01011000 + direction,1,0)
def Move (self, steps, direction):
""" Moves a number of microsteps in a given direction. The units of
<steps> are determined by the selected step mode. The BUSY flag
goes low until all steps have happened. This command cannot be
run if the motor is running- NOTPERF_CMD flag will raise
otherwise.
@arg @c steps (int): the number of (micro)steps to perform.
@arg @c direction (int): The direction to rotate. Must be 1 or 0.
@returns @c -1 if the direction argument was invalid.
@returns @c 0 if the command ran successfully.
"""
if (direction != 1) and (direction != 0):
return -1 # unpermitted behavior
self.spi.send_recieve(0b01000000 + direction,1,0)
self.spi.send_recieve(steps,3,0)
def GoTo (self, position):
""" Brings motor to the step count of <position> via the minimum path.
The units of <steps> are determined by the selected step mode.
The BUSY flag goes low until the position is reached. This
command can only be run if the motor is stopped- the NOTPERF_
CMD flag will raise otherwise.
@arg @c position (int): the absolute position to rotate to.
"""
self.spi.send_recieve(0b01100000,1,0)
self.spi.send_recieve(position,3,0)
def GoTo_DIR (self, position, direction):
""" Brings motor to the step count of <position>, forcing <direction>.
This command works the same way GoTo() does, but the direction
of rotation is in the direction given by the argument, rather
than the minimum path.
@arg @c position (int): the absolute position to rotate to.
@arg @c direction (int): the direction to rotate in. Must be 1 or 0.
@returns @c -1 if the direction argument was invalid.
@returns @c 0 if the command ran successfully.
"""
if (direction != 1) and (direction != 0):
return -1 # unpermitted behavior
cmd = 0b01101000 + direction
self.spi.send_recieve(cmd,1,0)
self.spi.send_recieve(position,3,0)
def GoUntil (self, speed, action, direction):
""" Performs a motion in <direction> at <speed> until Switch is closed,
then performs <action> followed by a SoftStop. If the SW_MODE
bit in the CONFIG register is set low, a HardStop is performed
instead of a SoftStop. This command pulls BUSY low until the
switch-on event occurs. This command can be given anytime and
immediately executes.
@arg @c speed (int): the speed to rotate at. Must be positive.
@arg @c action (int): 0 = reset ABS_POS register, 1 = copy ABS_POS into MARK.
@arg @c direction (int): the direction to rotate in. Must be 1 or 0.
@returns @c -1 if the direction or action argument was invalid.
@returns @c 0 if the command ran successfully.
"""
if (action != 1) and (action != 0):
return -1 # unpermitted behavior
if (direction != 1) and (direction != 0):
return -1 # unpermitted behavior
self.spi.send_recieve(0b01000010 + (action<<3) + direction,1,0)
def ReleaseSW (self, action, direction):
""" Performs a motion in <direction> at minimum speed until Switch is
released (open), then performs <action> followed by a HardStop.
If the minimum speed is less than 5 step/s or low speed
optimization is enabled, the motor turns at 5 step/s. This
command keeps the BUSY flag low until the switch is released and
the motor stops.
@arg @c action (int): 0 = reset ABS_POS register, 1 = copy ABS_POS into MARK.
@arg @c direction (int): the direction to rotate in. Must be 1 or 0.
@returns @c -1 if the direction or action argument was invalid.
@returns @c 0 if the command ran successfully.
"""
if (action != 1) and (action != 0):
return -1 # unpermitted behavior
if (direction != 1) and (direction != 0):
return -1 # unpermitted behavior
self.spi.send_recieve(0b01110000 + (action<<3) + direction,1,0)
def GoHome (self):
""" Brings the motor to the HOME position (ABS_POS == 0) via the shortest
path. Note that this command is equivalent to GoTo(0). If a
direction is mandatory, use GoTo_DIR(). This command keeps the
BUSY flag until the home position is reached. This command can be
given only when the previous command is completed- if BUSY is low
when this command is called, the NOTPERF_CMD flag will raise.
"""
self.spi.send_recieve(0b01110000,1,0)
def GoMark (self):
""" Brings the motor to the MARK position via the minimum path. Note
that this command is equivalent to using GoTo with the value of
the MARK register. Use GoTo_DIR() if a direction is mandatory.
"""
self.spi.send_recieve(0b01111000,1,0)
def ResetPos (self):
""" Resets the ABS_POS register to zero (ie, sets HOME position).
"""
self.spi.send_recieve(0b11011000,1,0)
def ResetDevice (self):
""" Resets the L6470 chip to power-up conditions.
"""
self.spi.send_recieve(0b11000000,1,0)
def SoftStop (self):
""" Stops the motor, using the value of the DEC register as the
deceleration. When the bridges are in Hi-Z, this command will
exit the Hi-Z state without performing any motion. SoftStop can
be run any time and runs immediately- the BUSY flag will be held
low until the motor stops.
"""
self.spi.send_recieve(0b10110000,1,0)
def HardStop (self):
""" Stops the motor immediately, with infinite deceleration. This
command interacts with the Hi-Z state and the BUSY flag just
like SoftStop().
"""
self.spi.send_recieve(0b10111000,1,0)
def SoftHiZ (self):
""" Puts bridges into Hi-Z after a deceleration phase using the value of
the DEC register. This command can be run at any time and is
immediately executed, and holds BUSY low until the motor stops.
"""
self.spi.send_recieve(0b10100000,1,0)
def HardHiZ (self):
""" Puts bridges into hi-z immediately, ignoring the DEC parameter. This
command can be run any time and immediately executes, holding
BUSY low until the motor stops.
"""
self.spi.send_recieve(0b10101000,1,0)
def GetStatus (self, verbose=0):
""" Returns the value of the STATUS register, and forces the system to
exit from any error state. This command does not reset the Hi-Z
or BUSY flags.
@arg @c verbose (optional int): If this is not zero, the command will print.
@return @c status (int): the two-byte value of the register.
"""
status = self.spi.send_recieve(0b11010000,1,2)
if verbose:
self.print_status(status)
return status
def print_status (self, status):
""" Formatted printing of status codes for the driver.
@arg @c status (int): the code returned by a GetStatus call.
"""
# === ELSE BEGIN HORROR ===
# check error flags
print ("Driver Status: ")#, bin(status))
for bit_addr in range(7,15):
print(" Flag ", self.STATUS_DICT[bit_addr][0], ": ", end=="")
# we shift a 1 to the bit address, then shift the result down again
if ((status & 1<<bit_addr)>>bit_addr)==self.STATUS_DICT[bit_addr][1]:
# the result should either be a 1 or 0. Which is 'ok' depends.
print("ok")
else:
print("Alert!")
# check SCK_MOD
if status & (1<<15):
print(" Step-clock mode is on.")
else:
print(" Step-clock mode is off.")
# check MOT_STATUS
if status & (1<<6):
if status & (1<<5):
print(" Motor is at constant speed.")
else:
print(" Motor is decelerating.")
else:
if status & (1<<5):
print(" Motor is accelerating.")
else:
print(" Motor is stopped.")
# check DIR
if status & (1<<4):
print(" Motor direction is set to forward.")
else:
print(" Motor direction is set to reverse.")
# check BUSY
if not (status & (1<<1)):
print(" Motor is busy with a movement command.")
else:
print(" Motor is ready to recieve movement commands.")
# check HiZ
if status & 1:
print(" Bridges are in high-impedance mode (disabled).")
else:
print(" Bridges are in low-impedance mode (active).")
# check SW_EVEN flag
if status & (1<<3):
print(" External switch has been clicked since last check.")
else:
print(" External switch has no activity to report.")
# check SW_F
if status & (1<<2):
print(" External switch is closed (grounded).")
else:
print(" External switch is open.")
| |
"""Non-interactive points marking local plugin for Ginga."""
from __future__ import absolute_import, division, print_function
from ginga.util.six import iteritems, itervalues
from ginga.util.six.moves import map, zip
# STDLIB
import re
import os
from collections import defaultdict
# THIRD-PARTY
import numpy as np
from astropy.table import Table
# GINGA
from ginga import colors
from ginga.GingaPlugin import LocalPlugin
from ginga.gw import Widgets
from ginga.misc import Bunch
# Need this for API doc to build without warning
try:
from ginga.gw.GwHelp import FileSelection
except ImportError:
pass
__all__ = []
class TVMark(LocalPlugin):
"""Mark points from file (non-interative mode) on an image."""
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(TVMark, self).__init__(fv, fitsimage)
self.layertag = 'tvmark-canvas'
self.marktag = None
self.markhltag = None
self._mark_options = ['box', 'circle', 'cross', 'plus', 'point']
self._color_options = self._short_color_list()
self._dwidth = 2 # Additional width to highlight selection
# User preferences. Some are just default values and can also be
# changed by GUI.
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_TVMark')
self.settings.load(onError='silent')
self.marktype = self.settings.get('marktype', 'circle')
self.markcolor = self.settings.get('markcolor', 'green')
self.marksize = self.settings.get('marksize', 5)
self.markwidth = self.settings.get('markwidth', 1)
self.pixelstart = self.settings.get('pixelstart', 1)
self.use_radec = self.settings.get('use_radec', True)
self.extra_columns = self.settings.get('extra_columns', [])
# Display coords info table
self.treeview = None
self.treeviewsel = None
self.treeviewbad = None
self.tree_dict = Bunch.caselessDict()
self.columns = [('No.', 'MARKID'), ('RA', 'RA'), ('DEC', 'DEC'),
('X', 'X'), ('Y', 'Y')]
# Append extra columns to table header
self.columns += [(colname, colname) for colname in self.extra_columns]
# Store results
self.coords_dict = defaultdict(list)
self._xarr = []
self._yarr = []
self._treepaths = []
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(False)
canvas.set_callback('draw-event', self.hl_canvas2table_box)
canvas.set_callback('cursor-down', self.hl_canvas2table)
canvas.set_surface(self.fitsimage)
canvas.set_drawtype('rectangle', color='green', linestyle='dash')
self.canvas = canvas
fv.add_callback('remove-image', lambda *args: self.redo())
self.gui_up = False
# If user complains about lack of choices (!!!), we can remove this.
def _short_color_list(self):
"""Color list is too long. Discard variations with numbers."""
return [c for c in colors.get_colors() if not re.search(r'\d', c)]
def build_gui(self, container):
vbox, sw, self.orientation = Widgets.get_oriented_box(container)
msg_font = self.fv.get_font('sansFont', 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander('Instructions')
fr.set_widget(tw)
container.add_widget(fr, stretch=0)
captions = (('Mark:', 'label', 'mark type', 'combobox'),
('Color:', 'label', 'mark color', 'combobox'),
('Size:', 'label', 'mark size', 'entry'),
('Width:', 'label', 'mark width', 'entry'))
w, b = Widgets.build_info(captions)
self.w.update(b)
combobox = b.mark_type
for name in self._mark_options:
combobox.append_text(name)
b.mark_type.set_index(self._mark_options.index(self.marktype))
b.mark_type.add_callback('activated', self.set_marktype_cb)
combobox = b.mark_color
for name in self._color_options:
combobox.append_text(name)
b.mark_color.set_index(self._color_options.index(self.markcolor))
b.mark_color.add_callback('activated', self.set_markcolor_cb)
b.mark_size.set_tooltip('Size/radius of the marking')
b.mark_size.set_text(str(self.marksize))
b.mark_size.add_callback('activated', lambda w: self.set_marksize())
b.mark_width.set_tooltip('Line width of the marking')
b.mark_width.set_text(str(self.markwidth))
b.mark_width.add_callback('activated', lambda w: self.set_markwidth())
container.add_widget(w, stretch=0)
nb = Widgets.TabWidget()
self.w.nb1 = nb
container.add_widget(nb, stretch=1)
treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview = treeview
treeview.setup_table(self.columns, 2, 'MARKID')
treeview.add_callback('selected', self.hl_table2canvas)
nb.add_widget(treeview, title='Shown')
treeview2 = Widgets.TreeView(auto_expand=True,
sortable=True,
use_alt_row_color=True)
self.treeviewsel = treeview2
treeview2.setup_table(self.columns, 2, 'MARKID')
nb.add_widget(treeview2, title='Selected')
treeview3 = Widgets.TreeView(auto_expand=True,
sortable=True,
use_alt_row_color=True)
self.treeviewbad = treeview3
treeview3.setup_table(self.columns, 2, 'MARKID')
nb.add_widget(treeview3, title='Outliers')
captions = (('Loaded:', 'llabel', 'ntotal', 'llabel',
'Shown:', 'llabel', 'nshown', 'llabel',
'Selected:', 'llabel', 'nselected', 'llabel'), )
w, b = Widgets.build_info(captions)
self.w.update(b)
b.ntotal.set_tooltip('Number of objects read from tables')
b.ntotal.set_text('0')
b.nshown.set_tooltip('Number of objects shown on image')
b.nshown.set_text('0')
b.nselected.set_tooltip('Number of objects selected')
b.nselected.set_text('0')
container.add_widget(w, stretch=0)
captions = (('Load Coords', 'button', 'Use RADEC', 'checkbutton'),
('Show', 'button', 'Hide', 'button', 'Forget', 'button'))
w, b = Widgets.build_info(captions)
self.w.update(b)
b.load_coords.set_tooltip('Load coordinates file')
b.load_coords.add_callback('activated', lambda w: self.load_coords_cb())
b.use_radec.set_tooltip('Use RA/DEC as coordinates instead of X/Y')
b.use_radec.set_state(self.use_radec)
b.use_radec.add_callback('activated', self.set_coordtype_cb)
b.show.set_tooltip('Show markings')
b.show.add_callback('activated', lambda w: self.redo())
b.hide.set_tooltip('Hide markings')
b.hide.add_callback('activated', lambda w: self.clear_marking())
b.forget.set_tooltip('Forget markings')
b.forget.add_callback('activated', lambda w: self.forget_coords())
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize coordinates file selection dialog
self.cfilesel = FileSelection(self.fv.w.root.get_widget())
# Populate table
self.redo()
def instructions(self):
self.tw.set_text("""Set mark parameters. Then, load coordinates file to mark them on image with the specified marking. To add different kind of marking, change the mark parameters and load another file.
Press "Hide" to clear all markings (does not clear memory). Press "Show" to replot them. Press "Forget" to forget all markings in memory.
""")
def redo(self):
"""Image or coordinates have changed. Clear and redraw."""
if not self.gui_up:
return
self.clear_marking()
self.tree_dict = Bunch.caselessDict()
self.treeviewbad.clear()
bad_tree_dict = Bunch.caselessDict()
nbad = 0
self._xarr = []
self._yarr = []
self._treepaths = []
image = self.fitsimage.get_image()
if image is None:
return
if not hasattr(image, 'radectopix'):
self.logger.error(
'Image as no radectopix() method for coordinates conversion')
return
objlist = []
seqno = 1
max_x = image.width - 1
max_y = image.height - 1
for key, coords in iteritems(self.coords_dict):
if len(coords) == 0:
continue
marktype, marksize, markcolor = key
kstr = ','.join(map(str, key))
sub_dict = {}
bad_sub_dict = {}
self.tree_dict[kstr] = sub_dict
bad_tree_dict[kstr] = bad_sub_dict
for args in coords:
ra, dec, x, y = args[:4]
# Use X and Y positions directly. Convert to RA and DEC (deg).
if ra is None or dec is None:
ra, dec = image.pixtoradec(x, y)
# RA and DEC already in degrees. Convert to pixel X and Y.
else:
x, y = image.radectopix(ra, dec)
# Display original X/Y (can be 0- or 1-indexed) using
# our internal 0-indexed values.
xdisp = x + self.pixelstart
ydisp = y + self.pixelstart
seqstr = '{0:04d}'.format(seqno) # Prepend 0s for proper sort
bnch = Bunch.Bunch(zip(self.extra_columns, args[4:])) # Extra
bnch.update(Bunch.Bunch(MARKID=seqstr, RA=ra, DEC=dec,
X=xdisp, Y=ydisp))
# Do not draw out of bounds
if (not np.isfinite(x) or x < 0 or x > max_x or
not np.isfinite(y) or y < 0 or y > max_y):
self.logger.debug('Ignoring RA={0}, DEC={1} '
'(x={2}, y={3})'.format(ra, dec, x, y))
bad_sub_dict[seqstr] = bnch
nbad += 1
# Display point
else:
obj = self._get_markobj(
x, y, marktype, marksize, markcolor, self.markwidth)
objlist.append(obj)
sub_dict[seqstr] = bnch
self._xarr.append(x)
self._yarr.append(y)
self._treepaths.append((kstr, seqstr))
seqno += 1
n_obj = len(objlist)
self.logger.debug('Displaying {0} markings'.format(n_obj))
if nbad > 0:
self.treeviewbad.set_tree(bad_tree_dict)
if n_obj == 0:
return
# Convert to Numpy arrays to avoid looping later
self._xarr = np.array(self._xarr)
self._yarr = np.array(self._yarr)
self._treepaths = np.array(self._treepaths)
# Display info table
self.recreate_toc()
# Draw on canvas
self.marktag = self.canvas.add(self.dc.CompoundObject(*objlist))
self.fitsimage.redraw() # Force immediate redraw
def _get_markobj(self, x, y, marktype, marksize, markcolor, markwidth):
"""Generate canvas object for given mark parameters."""
if marktype == 'circle':
obj = self.dc.Circle(
x=x, y=y, radius=marksize, color=markcolor, linewidth=markwidth)
elif marktype in ('cross', 'plus'):
obj = self.dc.Point(
x=x, y=y, radius=marksize, color=markcolor, linewidth=markwidth,
style=marktype)
elif marktype == 'box':
obj = self.dc.Box(
x=x, y=y, xradius=marksize, yradius=marksize, color=markcolor,
linewidth=markwidth)
else: # point, marksize
obj = self.dc.Box(
x=x, y=y, xradius=1, yradius=1, color=markcolor,
linewidth=markwidth, fill=True, fillcolor=markcolor)
return obj
def clear_marking(self):
"""Clear marking from image.
This does not clear loaded coordinates from memory."""
if self.marktag:
try:
self.canvas.delete_object_by_tag(self.marktag, redraw=False)
except:
pass
if self.markhltag:
try:
self.canvas.delete_object_by_tag(self.markhltag, redraw=False)
except:
pass
self.treeview.clear() # Clear table too
self.w.nshown.set_text('0')
self.fitsimage.redraw() # Force immediate redraw
def forget_coords(self):
"""Forget all loaded coordinates."""
self.w.ntotal.set_text('0')
self.coords_dict.clear()
self.redo()
# TODO: Support more formats?
def load_file(self, filename):
"""Load coordinates file.
Results are appended to previously loaded coordinates.
This can be used to load one file per color.
"""
if not os.path.isfile(filename):
return
self.logger.info('Loading coordinates from {0}'.format(filename))
if filename.endswith('.fits'):
fmt = 'fits'
else: # Assume ASCII
fmt = 'ascii'
try:
tab = Table.read(filename, format=fmt)
except Exception as e:
self.logger.error('{0}: {1}'.format(e.__class__.__name__, str(e)))
return
if self.use_radec:
colname0 = self.settings.get('ra_colname', 'ra')
colname1 = self.settings.get('dec_colname', 'dec')
else:
colname0 = self.settings.get('x_colname', 'x')
colname1 = self.settings.get('y_colname', 'y')
try:
col_0 = tab[colname0]
col_1 = tab[colname1]
except Exception as e:
self.logger.error('{0}: {1}'.format(e.__class__.__name__, str(e)))
return
nrows = len(col_0)
dummy_col = [None] * nrows
try:
oldrows = int(self.w.ntotal.get_text())
except ValueError:
oldrows = 0
self.w.ntotal.set_text(str(oldrows + nrows))
if self.use_radec:
ra = self._convert_radec(col_0)
dec = self._convert_radec(col_1)
x = y = dummy_col
else:
ra = dec = dummy_col
# X and Y always 0-indexed internally
x = col_0.data - self.pixelstart
y = col_1.data - self.pixelstart
args = [ra, dec, x, y]
# Load extra columns
for colname in self.extra_columns:
try:
col = tab[colname].data
except Exception as e:
self.logger.error(
'{0}: {1}'.format(e.__class__.__name__, str(e)))
col = dummy_col
args.append(col)
# Use list to preserve order. Does not handle duplicates.
key = (self.marktype, self.marksize, self.markcolor)
self.coords_dict[key] += list(zip(*args))
self.redo()
def _convert_radec(self, val):
"""Convert RA or DEC table column to degrees and extract data.
Assume already in degrees if cannot convert.
"""
try:
ans = val.to('deg')
except Exception as e:
self.logger.error('Cannot convert, assume already in degrees')
ans = val.data
else:
ans = ans.value
return ans
# TODO: Support more extensions?
def load_coords_cb(self):
"""Activate file dialog to select coordinates file."""
self.cfilesel.popup('Load coordinates file', self.load_file,
initialdir='.',
filename='Table files (*.txt *.dat *.fits)')
def set_coordtype_cb(self, w, val):
"""Toggle between RA/DEC or X/Y coordinates."""
self.use_radec = val
def recreate_toc(self):
self.logger.debug('Recreating table of contents...')
self.treeview.set_tree(self.tree_dict)
n = 0
for sub_dict in itervalues(self.tree_dict):
n += len(sub_dict)
self.w.nshown.set_text(str(n))
def hl_table2canvas(self, w, res_dict):
"""Highlight marking on canvas when user click on table."""
objlist = []
width = self.markwidth + self._dwidth
# Remove existing highlight
if self.markhltag:
try:
self.canvas.delete_object_by_tag(self.markhltag, redraw=False)
except:
pass
# Display highlighted entries only in second table
self.treeviewsel.set_tree(res_dict)
for kstr, sub_dict in iteritems(res_dict):
s = kstr.split(',')
marktype = s[0]
marksize = float(s[1])
markcolor = s[2]
for bnch in itervalues(sub_dict):
obj = self._get_markobj(bnch.X - self.pixelstart,
bnch.Y - self.pixelstart,
marktype, marksize, markcolor, width)
objlist.append(obj)
nsel = len(objlist)
self.w.nselected.set_text(str(nsel))
# Draw on canvas
if nsel > 0:
self.markhltag = self.canvas.add(self.dc.CompoundObject(*objlist))
self.fitsimage.redraw() # Force immediate redraw
def hl_canvas2table_box(self, canvas, tag):
"""Highlight all markings inside user drawn box on table."""
self.treeview.clear_selection()
# Remove existing box
cobj = canvas.get_object_by_tag(tag)
if cobj.kind != 'rectangle':
return
canvas.delete_object_by_tag(tag, redraw=False)
# Remove existing highlight
if self.markhltag:
try:
canvas.delete_object_by_tag(self.markhltag, redraw=True)
except:
pass
# Nothing to do if no markings are displayed
try:
obj = canvas.get_object_by_tag(self.marktag)
except:
return
if obj.kind != 'compound':
return
# Nothing to do if table has no data
if (len(self._xarr) == 0 or len(self._yarr) == 0 or
len(self._treepaths) == 0):
return
# Find markings inside box
mask = cobj.contains_arr(self._xarr, self._yarr)
for hlpath in self._treepaths[mask]:
self._highlight_path(hlpath)
def hl_canvas2table(self, canvas, button, data_x, data_y):
"""Highlight marking on table when user click on canvas."""
self.treeview.clear_selection()
# Remove existing highlight
if self.markhltag:
try:
canvas.delete_object_by_tag(self.markhltag, redraw=True)
except:
pass
# Nothing to do if no markings are displayed
try:
obj = canvas.get_object_by_tag(self.marktag)
except:
return
if obj.kind != 'compound':
return
# Nothing to do if table has no data
if (len(self._xarr) == 0 or len(self._yarr) == 0 or
len(self._treepaths) == 0):
return
sr = self.settings.get('searchradius', 10)
dx = data_x - self._xarr
dy = data_y - self._yarr
dr = np.sqrt(dx * dx + dy * dy)
mask = dr <= sr
for hlpath in self._treepaths[mask]:
self._highlight_path(hlpath)
def _highlight_path(self, hlpath):
"""Highlight an entry in the table and associated marking."""
self.logger.debug('Highlighting {0}'.format(hlpath))
self.treeview.select_path(hlpath)
# TODO: Does not work in Qt. This is known issue in Ginga.
self.treeview.scroll_to_path(hlpath)
def set_marktype_cb(self, w, index):
"""Set type of marking."""
self.marktype = self._mark_options[index]
# Mark size is not used for point
if self.marktype != 'point':
self.w.mark_size.set_enabled(True)
else:
self.w.mark_size.set_enabled(False)
def set_markcolor_cb(self, w, index):
"""Set color of marking."""
self.markcolor = self._color_options[index]
def set_marksize(self):
"""Set size/radius of marking."""
try:
sz = float(self.w.mark_size.get_text())
except ValueError:
self.logger.error('Cannot set mark size')
self.w.mark_size.set_text(str(self.marksize))
else:
self.marksize = sz
def set_markwidth(self):
"""Set width of marking."""
try:
sz = int(self.w.mark_width.get_text())
except ValueError:
self.logger.error('Cannot set mark width')
self.w.mark_width.set_text(str(self.markwidth))
else:
self.markwidth = sz
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.instructions()
# insert canvas, if not already
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add drawing layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_setActive(True)
self.fv.show_status('See instructions')
def stop(self):
# remove canvas from image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except:
pass
# Free some memory, maybe
self.tree_dict = Bunch.caselessDict()
self._xarr = []
self._yarr = []
self._treepaths = []
self.gui_up = False
self.fv.show_status('')
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'tvmark'
# Replace module docstring with config doc for auto insert by Sphinx.
# In the future, if we need the real docstring, we can append instead of
# overwrite.
from ginga.util.toolbox import generate_cfg_example
__doc__ = generate_cfg_example('plugin_TVMark', package='ginga')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Build channels names based on FRIB naming convention.
"""
from collections import OrderedDict
from phantasy.library.layout import BCMElement
from phantasy.library.layout import BLElement
from phantasy.library.layout import BLMElement
from phantasy.library.layout import BPMElement
from phantasy.library.layout import BendElement
from phantasy.library.layout import CavityElement
from phantasy.library.layout import CorElement
from phantasy.library.layout import DriftElement
from phantasy.library.layout import EBendElement
from phantasy.library.layout import EMSElement
from phantasy.library.layout import EQuadElement
from phantasy.library.layout import FCElement
from phantasy.library.layout import PMElement
from phantasy.library.layout import PortElement
from phantasy.library.layout import QuadElement
from phantasy.library.layout import SextElement
from phantasy.library.layout import SolCorElement
from phantasy.library.layout import SolElement
from phantasy.library.layout import StripElement
from phantasy.library.layout import VDElement
from phantasy.library.layout import ValveElement
_INDEX_PROPERTY = "elemIndex"
_POSITION_PROPERTY = "elemPosition"
_LENGTH_PROPERTY = "elemLength"
_MACHINE_PROPERTY = "machine"
_NAME_PROPERTY = "elemName"
_HANDLE_PROPERTY = "elemHandle"
_FIELD_PROPERTY = "elemField"
_TYPE_PROPERTY = "elemType"
_PHYTYPE_PROPERTY = "physicsType"
_PHYNAME_PROPERTY = "physicsName"
_MISC_PROPERTY = "misc"
def build_channels(layout, machine=None, **kws):
"""Build channels using FRIB naming convention from accelerator layout.
Parameters
----------
layout :
Accelerator layout object
machine : str
Machine identifier and optional channel prefix.
Keyword Arguments
-----------------
start: str
Start element.
end: str
End element.
Returns
-------
ret : list(tuple)
List of tuples of (channel, properties, tags)
See Also
--------
:class:`~phantasy.library.layout.Layout`
:func:`~phantasy.library.misc.complicate_data`
"""
if machine is None:
machine = "LIVE"
prefix = ""
else:
prefix = machine + ":"
data = []
index = 0
offset = None
_start = kws.get('start', None)
_end = kws.get('end', None)
for elem in layout.iter(_start, _end):
index += 1
if offset is None:
offset = elem.z - (elem.length / 2.0)
def buildChannel(element):
channel = "{}{elem.system}_{elem.subsystem}:{elem.device}_{elem.inst}".format(prefix, elem=element)
props = OrderedDict()
props[_INDEX_PROPERTY] = index
props[_POSITION_PROPERTY] = str(element.z + (element.length / 2.0) - offset)
props[_LENGTH_PROPERTY] = str(element.length)
props[_MACHINE_PROPERTY] = machine
props[_NAME_PROPERTY] = element.name
props[_HANDLE_PROPERTY] = ""
props[_FIELD_PROPERTY] = ""
props[_TYPE_PROPERTY] = ""
props[_PHYTYPE_PROPERTY] = element.dtype
props[_PHYNAME_PROPERTY] = element.desc
tags = []
tags.append("phantasy.sys." + element.system)
tags.append("phantasy.sub." + element.subsystem)
return channel, props, tags
channel, props, tags = buildChannel(elem)
if isinstance(elem, CavityElement):
props[_TYPE_PROPERTY] = "CAV"
props[_FIELD_PROPERTY] = elem.fields.phase
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":PHA_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":PHA_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":PHA_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.amplitude
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":AMPL_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":AMPL_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":AMPL_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, SolCorElement):
props[_TYPE_PROPERTY] = "SOL"
props[_FIELD_PROPERTY] = elem.fields.field
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":B_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":B_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":B_RD", OrderedDict(props), list(tags)))
channel, props, tags = buildChannel(elem.h)
props[_TYPE_PROPERTY] = "HCOR"
props[_FIELD_PROPERTY] = elem.h.fields.angle
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":ANG_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":ANG_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":ANG_RD", OrderedDict(props), list(tags)))
channel, props, tags = buildChannel(elem.v)
props[_TYPE_PROPERTY] = "VCOR"
props[_FIELD_PROPERTY] = elem.v.fields.angle
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":ANG_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":ANG_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":ANG_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, SolElement):
props[_TYPE_PROPERTY] = "SOL"
props[_FIELD_PROPERTY] = elem.fields.field
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":B_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":B_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":B_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, CorElement):
channel, props, tags = buildChannel(elem.h)
props[_TYPE_PROPERTY] = "HCOR"
props[_FIELD_PROPERTY] = elem.h.fields.angle
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":ANG_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":ANG_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":ANG_RD", OrderedDict(props), list(tags)))
channel, props, tags = buildChannel(elem.v)
props[_TYPE_PROPERTY] = "VCOR"
props[_FIELD_PROPERTY] = elem.v.fields.angle
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":ANG_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":ANG_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":ANG_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, QuadElement):
props[_TYPE_PROPERTY] = "QUAD"
props[_FIELD_PROPERTY] = elem.fields.gradient
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":GRAD_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":GRAD_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":GRAD_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, BendElement):
props[_TYPE_PROPERTY] = "BEND"
props[_FIELD_PROPERTY] = elem.fields.field
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":B_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":B_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":B_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, SextElement):
props[_TYPE_PROPERTY] = "SEXT"
props[_FIELD_PROPERTY] = elem.fields.field
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":B3_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":B3_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":B3_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, BPMElement):
props[_TYPE_PROPERTY] = "BPM"
props[_FIELD_PROPERTY] = elem.fields.x
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":X_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.y
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":Y_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.phase
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":PHA_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.energy
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":ENG_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, PMElement):
props[_TYPE_PROPERTY] = "PM"
props[_FIELD_PROPERTY] = elem.fields.x
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":X_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.y
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":Y_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.xy
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":XY_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.xrms
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":XRMS_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.yrms
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":YRMS_RD", OrderedDict(props), list(tags)))
props[_FIELD_PROPERTY] = elem.fields.xyrms
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":XYRMS_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, EBendElement):
props[_TYPE_PROPERTY] = "EBEND"
props[_FIELD_PROPERTY] = elem.fields.field
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":V_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":V_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":V_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, EQuadElement):
props[_TYPE_PROPERTY] = "EQUAD"
props[_FIELD_PROPERTY] = elem.fields.gradient
props[_HANDLE_PROPERTY] = "setpoint"
data.append((channel + ":V_CSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readset"
data.append((channel + ":V_RSET", OrderedDict(props), list(tags)))
props[_HANDLE_PROPERTY] = "readback"
data.append((channel + ":V_RD", OrderedDict(props), list(tags)))
elif isinstance(elem, StripElement):
# Charge Stripper has no channels
pass
elif isinstance(elem, (BLMElement, BLElement, BCMElement)):
# Diagnostic elements do not have defined channels
pass
elif isinstance(elem, (DriftElement, ValveElement, PortElement)):
# Passtive elements do not have defined channels
pass
elif isinstance(elem, EMSElement):
pass
elif isinstance(elem, VDElement):
pass
elif isinstance(elem, FCElement):
pass
else:
raise RuntimeError("read_layout: Element type '{}' not supported".format(elem.ETYPE))
return data
| |
# Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
# Common logic to interact with U-Boot via the console. This class provides
# the interface that tests use to execute U-Boot shell commands and wait for
# their results. Sub-classes exist to perform board-type-specific setup
# operations, such as spawning a sub-process for Sandbox, or attaching to the
# serial console of real hardware.
import multiplexed_log
import os
import pytest
import re
import sys
import u_boot_spawn
# Regexes for text we expect U-Boot to send to the console.
pattern_u_boot_spl_signon = re.compile('(U-Boot SPL \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_u_boot_main_signon = re.compile('(U-Boot \\d{4}\\.\\d{2}[^\r\n]*\\))')
pattern_stop_autoboot_prompt = re.compile('Hit any key to stop autoboot: ')
pattern_unknown_command = re.compile('Unknown command \'.*\' - try \'help\'')
pattern_error_notification = re.compile('## Error: ')
pattern_error_please_reset = re.compile('### ERROR ### Please RESET the board ###')
PAT_ID = 0
PAT_RE = 1
bad_pattern_defs = (
('spl_signon', pattern_u_boot_spl_signon),
('main_signon', pattern_u_boot_main_signon),
('stop_autoboot_prompt', pattern_stop_autoboot_prompt),
('unknown_command', pattern_unknown_command),
('error_notification', pattern_error_notification),
('error_please_reset', pattern_error_please_reset),
)
class ConsoleDisableCheck(object):
"""Context manager (for Python's with statement) that temporarily disables
the specified console output error check. This is useful when deliberately
executing a command that is known to trigger one of the error checks, in
order to test that the error condition is actually raised. This class is
used internally by ConsoleBase::disable_check(); it is not intended for
direct usage."""
def __init__(self, console, check_type):
self.console = console
self.check_type = check_type
def __enter__(self):
self.console.disable_check_count[self.check_type] += 1
self.console.eval_bad_patterns()
def __exit__(self, extype, value, traceback):
self.console.disable_check_count[self.check_type] -= 1
self.console.eval_bad_patterns()
class ConsoleBase(object):
"""The interface through which test functions interact with the U-Boot
console. This primarily involves executing shell commands, capturing their
results, and checking for common error conditions. Some common utilities
are also provided too."""
def __init__(self, log, config, max_fifo_fill):
"""Initialize a U-Boot console connection.
Can only usefully be called by sub-classes.
Args:
log: A mulptiplex_log.Logfile object, to which the U-Boot output
will be logged.
config: A configuration data structure, as built by conftest.py.
max_fifo_fill: The maximum number of characters to send to U-Boot
command-line before waiting for U-Boot to echo the characters
back. For UART-based HW without HW flow control, this value
should be set less than the UART RX FIFO size to avoid
overflow, assuming that U-Boot can't keep up with full-rate
traffic at the baud rate.
Returns:
Nothing.
"""
self.log = log
self.config = config
self.max_fifo_fill = max_fifo_fill
self.logstream = self.log.get_stream('console', sys.stdout)
# Array slice removes leading/trailing quotes
self.prompt = self.config.buildconfig['config_sys_prompt'][1:-1]
self.prompt_escaped = re.escape(self.prompt)
self.p = None
self.disable_check_count = {pat[PAT_ID]: 0 for pat in bad_pattern_defs}
self.eval_bad_patterns()
self.at_prompt = False
self.at_prompt_logevt = None
def eval_bad_patterns(self):
self.bad_patterns = [pat[PAT_RE] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
self.bad_pattern_ids = [pat[PAT_ID] for pat in bad_pattern_defs \
if self.disable_check_count[pat[PAT_ID]] == 0]
def close(self):
"""Terminate the connection to the U-Boot console.
This function is only useful once all interaction with U-Boot is
complete. Once this function is called, data cannot be sent to or
received from U-Boot.
Args:
None.
Returns:
Nothing.
"""
if self.p:
self.p.close()
self.logstream.close()
def run_command(self, cmd, wait_for_echo=True, send_nl=True,
wait_for_prompt=True):
"""Execute a command via the U-Boot console.
The command is always sent to U-Boot.
U-Boot echoes any command back to its output, and this function
typically waits for that to occur. The wait can be disabled by setting
wait_for_echo=False, which is useful e.g. when sending CTRL-C to
interrupt a long-running command such as "ums".
Command execution is typically triggered by sending a newline
character. This can be disabled by setting send_nl=False, which is
also useful when sending CTRL-C.
This function typically waits for the command to finish executing, and
returns the console output that it generated. This can be disabled by
setting wait_for_prompt=False, which is useful when invoking a long-
running command such as "ums".
Args:
cmd: The command to send.
wait_for_each: Boolean indicating whether to wait for U-Boot to
echo the command text back to its output.
send_nl: Boolean indicating whether to send a newline character
after the command string.
wait_for_prompt: Boolean indicating whether to wait for the
command prompt to be sent by U-Boot. This typically occurs
immediately after the command has been executed.
Returns:
If wait_for_prompt == False:
Nothing.
Else:
The output from U-Boot during command execution. In other
words, the text U-Boot emitted between the point it echod the
command string and emitted the subsequent command prompts.
"""
if self.at_prompt and \
self.at_prompt_logevt != self.logstream.logfile.cur_evt:
self.logstream.write(self.prompt, implicit=True)
try:
self.at_prompt = False
if send_nl:
cmd += '\n'
while cmd:
# Limit max outstanding data, so UART FIFOs don't overflow
chunk = cmd[:self.max_fifo_fill]
cmd = cmd[self.max_fifo_fill:]
self.p.send(chunk)
if not wait_for_echo:
continue
chunk = re.escape(chunk)
chunk = chunk.replace('\\\n', '[\r\n]')
m = self.p.expect([chunk] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
if not wait_for_prompt:
return
m = self.p.expect([self.prompt_escaped] + self.bad_patterns)
if m != 0:
self.at_prompt = False
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
# Only strip \r\n; space/TAB might be significant if testing
# indentation.
return self.p.before.strip('\r\n')
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
def ctrlc(self):
"""Send a CTRL-C character to U-Boot.
This is useful in order to stop execution of long-running synchronous
commands such as "ums".
Args:
None.
Returns:
Nothing.
"""
self.log.action('Sending Ctrl-C')
self.run_command(chr(3), wait_for_echo=False, send_nl=False)
def wait_for(self, text):
"""Wait for a pattern to be emitted by U-Boot.
This is useful when a long-running command such as "dfu" is executing,
and it periodically emits some text that should show up at a specific
location in the log file.
Args:
text: The text to wait for; either a string (containing raw text,
not a regular expression) or an re object.
Returns:
Nothing.
"""
if type(text) == type(''):
text = re.escape(text)
m = self.p.expect([text] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
def drain_console(self):
"""Read from and log the U-Boot console for a short time.
U-Boot's console output is only logged when the test code actively
waits for U-Boot to emit specific data. There are cases where tests
can fail without doing this. For example, if a test asks U-Boot to
enable USB device mode, then polls until a host-side device node
exists. In such a case, it is useful to log U-Boot's console output
in case U-Boot printed clues as to why the host-side even did not
occur. This function will do that.
Args:
None.
Returns:
Nothing.
"""
# If we are already not connected to U-Boot, there's nothing to drain.
# This should only happen when a previous call to run_command() or
# wait_for() failed (and hence the output has already been logged), or
# the system is shutting down.
if not self.p:
return
orig_timeout = self.p.timeout
try:
# Drain the log for a relatively short time.
self.p.timeout = 1000
# Wait for something U-Boot will likely never send. This will
# cause the console output to be read and logged.
self.p.expect(['This should never match U-Boot output'])
except u_boot_spawn.Timeout:
pass
finally:
self.p.timeout = orig_timeout
def ensure_spawned(self):
"""Ensure a connection to a correctly running U-Boot instance.
This may require spawning a new Sandbox process or resetting target
hardware, as defined by the implementation sub-class.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
if self.p:
return
try:
self.at_prompt = False
self.log.action('Starting U-Boot')
self.p = self.get_spawn()
# Real targets can take a long time to scroll large amounts of
# text if LCD is enabled. This value may need tweaking in the
# future, possibly per-test to be optimal. This works for 'help'
# on board 'seaboard'.
if not self.config.gdbserver:
self.p.timeout = 30000
self.p.logfile_read = self.logstream
if self.config.buildconfig.get('CONFIG_SPL', False) == 'y':
m = self.p.expect([pattern_u_boot_spl_signon] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
m = self.p.expect([pattern_u_boot_main_signon] + self.bad_patterns)
if m != 0:
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 1])
self.u_boot_version_string = self.p.after
while True:
m = self.p.expect([self.prompt_escaped,
pattern_stop_autoboot_prompt] + self.bad_patterns)
if m == 0:
break
if m == 1:
self.p.send(chr(3)) # CTRL-C
continue
raise Exception('Bad pattern found on console: ' +
self.bad_pattern_ids[m - 2])
self.at_prompt = True
self.at_prompt_logevt = self.logstream.logfile.cur_evt
except Exception as ex:
self.log.error(str(ex))
self.cleanup_spawn()
raise
def cleanup_spawn(self):
"""Shut down all interaction with the U-Boot instance.
This is used when an error is detected prior to re-establishing a
connection with a fresh U-Boot instance.
This is an internal function and should not be called directly.
Args:
None.
Returns:
Nothing.
"""
try:
if self.p:
self.p.close()
except:
pass
self.p = None
def validate_version_string_in_text(self, text):
"""Assert that a command's output includes the U-Boot signon message.
This is primarily useful for validating the "version" command without
duplicating the signon text regex in a test function.
Args:
text: The command output text to check.
Returns:
Nothing. An exception is raised if the validation fails.
"""
assert(self.u_boot_version_string in text)
def disable_check(self, check_type):
"""Temporarily disable an error check of U-Boot's output.
Create a new context manager (for use with the "with" statement) which
temporarily disables a particular console output error check.
Args:
check_type: The type of error-check to disable. Valid values may
be found in self.disable_check_count above.
Returns:
A context manager object.
"""
return ConsoleDisableCheck(self, check_type)
| |
from __future__ import with_statement
import datetime
import random
import hashlib
import base64
import pytz
from decimal import Decimal
from django.db import models
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django_bitcoin.utils import *
from django_bitcoin.utils import bitcoind
from django_bitcoin import settings
from django.utils.translation import ugettext as _
import django.dispatch
import jsonrpc
from BCAddressField import is_valid_btc_address
from django.db import transaction as db_transaction
from celery import task
from distributedlock import distributedlock, MemcachedLock, LockNotAcquiredError
from django.db.models import Avg, Max, Min, Sum
def CacheLock(key, lock=None, blocking=True, timeout=10000):
if lock is None:
lock = MemcachedLock(key=key, client=cache, timeout=timeout)
return distributedlock(key, lock, blocking)
def NonBlockingCacheLock(key, lock=None, blocking=False, timeout=10000):
if lock is None:
lock = MemcachedLock(key=key, client=cache, timeout=timeout)
return distributedlock(key, lock, blocking)
balance_changed = django.dispatch.Signal(providing_args=["changed", "transaction", "bitcoinaddress"])
balance_changed_confirmed = django.dispatch.Signal(providing_args=["changed", "transaction", "bitcoinaddress"])
currencies = (
(1, "USD"),
(2, "EUR"),
(3, "BTC")
)
# XXX There *is* a risk when dealing with less then 6 confirmations. Check:
# http://eprint.iacr.org/2012/248.pdf
# http://blockchain.info/double-spends
# for an informed decision.
confirmation_choices = (
(0, "0, (quick, recommended)"),
(1, "1, (safer, slower for the buyer)"),
(5, "5, (for the paranoid, not recommended)")
)
class Transaction(models.Model):
created_at = models.DateTimeField(default=datetime.datetime.now)
amount = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
address = models.CharField(max_length=50)
class DepositTransaction(models.Model):
created_at = models.DateTimeField(default=datetime.datetime.now)
address = models.ForeignKey('BitcoinAddress')
amount = models.DecimalField(max_digits=16, decimal_places=8, default=Decimal(0))
description = models.CharField(max_length=100, blank=True, null=True, default=None)
wallet = models.ForeignKey("Wallet")
under_execution = models.BooleanField(default=False) # execution fail
transaction = models.ForeignKey('WalletTransaction', null=True, default=None)
confirmations = models.IntegerField(default=0)
txid = models.CharField(max_length=100, blank=True, null=True)
def __unicode__(self):
return self.address.address + u", " + unicode(self.amount)
# class BitcoinBlock(models.Model):
# created_at = models.DateTimeField(default=datetime.datetime.now)
# blockhash = models.CharField(max_length=100)
# blockheight = models.IntegerField()
# confirmations = models.IntegerField(default=0)
# parent = models.ForeignKey('BitcoinBlock')
class OutgoingTransaction(models.Model):
created_at = models.DateTimeField(default=datetime.datetime.now)
expires_at = models.DateTimeField(default=datetime.datetime.now)
executed_at = models.DateTimeField(null=True,default=None)
under_execution = models.BooleanField(default=False) # execution fail
to_bitcoinaddress = models.CharField(
max_length=50,
blank=True)
amount = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
# description = models.CharField(max_length=100, blank=True)
txid = models.CharField(max_length=100, blank=True, null=True, default=None)
def __unicode__(self):
return unicode(self.created_at) + ": " + self.to_bitcoinaddress + u", " + unicode(self.amount)
@task()
def update_wallet_balance(wallet_id):
w = Wallet.objects.get(id=wallet_id)
Wallet.objects.filter(id=wallet_id).update(last_balance=w.total_balance_sql())
from time import sleep
# @task()
# @db_transaction.commit_manually
# def process_outgoing_transactions():
# if cache.get("process_outgoing_transactions"):
# print "process ongoing, skipping..."
# db_transaction.rollback()
# return
# if cache.get("wallet_downtime_utc"):
# db_transaction.rollback()
# return
# # try out bitcoind connection
# print bitcoind.bitcoind_api.getinfo()
# with NonBlockingCacheLock('process_outgoing_transactions'):
# update_wallets = []
# for ot in OutgoingTransaction.objects.filter(executed_at=None)[:3]:
# result = None
# updated = OutgoingTransaction.objects.filter(id=ot.id,
# executed_at=None, txid=None, under_execution=False).select_for_update().update(executed_at=datetime.datetime.now(), txid=result)
# db_transaction.commit()
# if updated:
# try:
# result = bitcoind.send(ot.to_bitcoinaddress, ot.amount)
# updated2 = OutgoingTransaction.objects.filter(id=ot.id, txid=None).select_for_update().update(txid=result)
# db_transaction.commit()
# if updated2:
# transaction = bitcoind.gettransaction(result)
# if Decimal(transaction['fee']) < Decimal(0):
# wt = ot.wallettransaction_set.all()[0]
# fee_transaction = WalletTransaction.objects.create(
# amount=Decimal(transaction['fee']) * Decimal(-1),
# from_wallet_id=wt.from_wallet_id)
# update_wallets.append(wt.from_wallet_id)
# except jsonrpc.JSONRPCException as e:
# if e.error == u"{u'message': u'Insufficient funds', u'code': -4}":
# OutgoingTransaction.objects.filter(id=ot.id, txid=None,
# under_execution=False).select_for_update().update(executed_at=None)
# db_transaction.commit()
# # sleep(10)
# raise
# else:
# OutgoingTransaction.objects.filter(id=ot.id).select_for_update().update(under_execution=True)
# db_transaction.commit()
# raise
# else:
# raise Exception("Outgoingtransaction can't be updated!")
# db_transaction.commit()
# for wid in update_wallets:
# update_wallet_balance.delay(wid)
# TODO: Group outgoing transactions to save on tx fees
def fee_wallet():
master_wallet_id = cache.get("django_bitcoin_fee_wallet_id")
if master_wallet_id:
return Wallet.objects.get(id=master_wallet_id)
try:
mw = Wallet.objects.get(label="django_bitcoin_fee_wallet")
except Wallet.DoesNotExist:
mw = Wallet.objects.create(label="django_bitcoin_fee_wallet")
mw.save()
cache.set("django_bitcoin_fee_wallet_id", mw.id)
return mw
def filter_doubles(outgoing_list):
ot_ids = []
ot_addresses = []
for ot in outgoing_list:
if ot.to_bitcoinaddress not in ot_addresses:
ot_ids.append(ot.id)
ot_addresses.append(ot.to_bitcoinaddress)
return ot_ids
@task()
@db_transaction.autocommit
def process_outgoing_transactions():
if OutgoingTransaction.objects.filter(executed_at=None, expires_at__lte=datetime.datetime.now()).count()>0 or \
OutgoingTransaction.objects.filter(executed_at=None).count()>6:
blockcount = bitcoind.bitcoind_api.getblockcount()
with NonBlockingCacheLock('process_outgoing_transactions'):
ots_ids = filter_doubles(OutgoingTransaction.objects.filter(executed_at=None).order_by("expires_at")[:15])
ots = OutgoingTransaction.objects.filter(executed_at=None, id__in=ots_ids)
update_wallets = []
transaction_hash = {}
for ot in ots:
transaction_hash[ot.to_bitcoinaddress] = float(ot.amount)
updated = OutgoingTransaction.objects.filter(id__in=ots_ids,
executed_at=None).select_for_update().update(executed_at=datetime.datetime.now())
if updated == len(ots):
try:
result = bitcoind.sendmany(transaction_hash)
except jsonrpc.JSONRPCException as e:
if e.error == u"{u'message': u'Insufficient funds', u'code': -4}" or \
e.error == u"{u'message': u'Insufficient funds', u'code': -6}":
u2 = OutgoingTransaction.objects.filter(id__in=ots_ids, under_execution=False
).select_for_update().update(executed_at=None)
else:
u2 = OutgoingTransaction.objects.filter(id__in=ots_ids, under_execution=False
).select_for_update().update(under_execution=True, txid=e.error)
raise
OutgoingTransaction.objects.filter(id__in=ots_ids).update(txid=result)
transaction = bitcoind.gettransaction(result)
if Decimal(transaction['fee']) < Decimal(0):
fw = fee_wallet()
fee_amount = Decimal(transaction['fee']) * Decimal(-1)
orig_fee_transaction = WalletTransaction.objects.create(
amount=fee_amount,
from_wallet=fw,
to_wallet=None)
i = 1
for ot_id in ots_ids:
wt = WalletTransaction.objects.get(outgoing_transaction__id=ot_id)
update_wallets.append(wt.from_wallet_id)
fee_transaction = WalletTransaction.objects.create(
amount=(fee_amount / Decimal(i)).quantize(Decimal("0.00000001")),
from_wallet_id=wt.from_wallet_id,
to_wallet=fw,
description="fee")
i += 1
else:
raise Exception("Updated amount not matchinf transaction amount!")
for wid in update_wallets:
update_wallet_balance.delay(wid)
# elif OutgoingTransaction.objects.filter(executed_at=None).count()>0:
# next_run_at = OutgoingTransaction.objects.filter(executed_at=None).aggregate(Min('expires_at'))['expires_at__min']
# if next_run_at:
# process_outgoing_transactions.retry(
# countdown=max(((next_run_at - datetime.datetime.now(pytz.utc)) + datetime.timedelta(seconds=5)).total_seconds(), 5))
class BitcoinAddress(models.Model):
address = models.CharField(max_length=50, unique=True)
created_at = models.DateTimeField(default=datetime.datetime.now)
active = models.BooleanField(default=False)
least_received = models.DecimalField(max_digits=16, decimal_places=8, default=Decimal(0))
least_received_confirmed = models.DecimalField(max_digits=16, decimal_places=8, default=Decimal(0))
label = models.CharField(max_length=50, blank=True, null=True, default=None)
wallet = models.ForeignKey("Wallet", null=True, related_name="addresses")
migrated_to_transactions = models.BooleanField(default=True)
class Meta:
verbose_name_plural = 'Bitcoin addresses'
def query_bitcoind(self, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS, triggered_tx=None):
raise Exception("Deprecated")
with CacheLock('query_bitcoind'):
r = bitcoind.total_received(self.address, minconf=minconf)
if r > self.least_received_confirmed and \
minconf >= settings.BITCOIN_MINIMUM_CONFIRMATIONS:
transaction_amount = r - self.least_received_confirmed
if settings.BITCOIN_TRANSACTION_SIGNALING:
if self.wallet:
balance_changed_confirmed.send(sender=self.wallet,
changed=(transaction_amount), bitcoinaddress=self)
updated = BitcoinAddress.objects.select_for_update().filter(id=self.id, least_received_confirmed=self.least_received_confirmed).update(least_received_confirmed=r)
if self.least_received < r:
BitcoinAddress.objects.select_for_update().filter(id=self.id,
least_received=self.least_received).update(least_received=r)
if self.wallet and updated:
dps = DepositTransaction.objects.filter(address=self, transaction=None,
amount__lte=transaction_amount, wallet=self.wallet).order_by("-amount", "-id")
total_confirmed_amount = Decimal(0)
confirmed_dps = []
for dp in dps:
if dp.amount <= transaction_amount - total_confirmed_amount:
DepositTransaction.objects.filter(id=dp.id).update(confirmations=minconf)
total_confirmed_amount += dp.amount
confirmed_dps.append(dp.id)
if total_confirmed_amount < transaction_amount:
dp = DepositTransaction.objects.create(address=self, amount=transaction_amount - total_confirmed_amount, wallet=self.wallet,
confirmations=minconf, txid=triggered_tx)
confirmed_dps.append(dp.id)
if self.migrated_to_transactions and updated:
wt = WalletTransaction.objects.create(to_wallet=self.wallet, amount=transaction_amount, description=self.address,
deposit_address=self, deposit_transaction=deposit_tx)
DepositTransaction.objects.select_for_update().filter(address=self, wallet=self.wallet,
id__in=confirmed_dps, transaction=None).update(transaction=wt)
update_wallet_balance.delay(self.wallet.id)
elif r > self.least_received:
transaction_amount = r - self.least_received
if settings.BITCOIN_TRANSACTION_SIGNALING:
if self.wallet:
balance_changed.send(sender=self.wallet, changed=(transaction_amount), bitcoinaddress=self)
# self.least_received = r
# self.save()
updated = BitcoinAddress.objects.select_for_update().filter(id=self.id, least_received=self.least_received).update(least_received=r)
if self.wallet and minconf==0 and updated:
DepositTransaction.objects.create(address=self, amount=transaction_amount, wallet=self.wallet,
confirmations=0, txid=triggered_tx)
return r
def query_bitcoin_deposit(self, deposit_tx):
if deposit_tx.transaction:
print "Already has a transaction!"
return
with CacheLock('query_bitcoind'):
r = bitcoind.total_received(self.address, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS)
received_amount = r - self.least_received_confirmed
if received_amount >= deposit_tx.amount and not deposit_tx.under_execution:
if settings.BITCOIN_TRANSACTION_SIGNALING:
if self.wallet:
balance_changed_confirmed.send(sender=self.wallet,
changed=(deposit_tx.amount), bitcoinaddress=self)
updated = BitcoinAddress.objects.select_for_update().filter(id=self.id,
least_received_confirmed=self.least_received_confirmed).update(
least_received_confirmed=self.least_received_confirmed + deposit_tx.amount)
if self.wallet and updated:
DepositTransaction.objects.select_for_update().filter(id=deposit_tx.id).update(under_execution=True)
deposit_tx.under_execution = True
self.least_received_confirmed = self.least_received_confirmed + deposit_tx.amount
if self.least_received < self.least_received_confirmed:
updated = BitcoinAddress.objects.select_for_update().filter(id=self.id).update(
least_received=self.least_received_confirmed)
if self.migrated_to_transactions:
wt = WalletTransaction.objects.create(to_wallet=self.wallet, amount=deposit_tx.amount, description=self.address,
deposit_address=self)
deposit_tx.transaction = wt
DepositTransaction.objects.select_for_update().filter(id=deposit_tx.id).update(transaction=wt)
self.wallet.update_last_balance(deposit_tx.amount)
else:
print "transaction not updated!"
else:
print "This path should not occur, but whatever."
# raise Exception("Should be never this way")
return r
def query_unconfirmed_deposits(self):
r = bitcoind.total_received(self.address, minconf=0)
if r > self.least_received:
transaction_amount = r - self.least_received
if settings.BITCOIN_TRANSACTION_SIGNALING:
if self.wallet:
balance_changed.send(sender=self.wallet, changed=(transaction_amount), bitcoinaddress=self)
updated = BitcoinAddress.objects.select_for_update().filter(id=self.id, least_received=self.least_received).update(least_received=r)
if updated:
self.least_received = r
def received(self, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS):
if settings.BITCOIN_TRANSACTION_SIGNALING:
if minconf >= settings.BITCOIN_MINIMUM_CONFIRMATIONS:
return self.least_received_confirmed
else:
return self.least_received
return self.query_bitcoind(minconf)
def __unicode__(self):
if self.label:
return u'%s (%s)' % (self.label, self.address)
return self.address
def new_bitcoin_address():
while True:
with db_transaction.autocommit():
db_transaction.enter_transaction_management()
db_transaction.commit()
bp = BitcoinAddress.objects.filter(Q(active=False) & Q(wallet__isnull=True) & \
Q(least_received__lte=0))
if len(bp) < 1:
refill_payment_queue()
db_transaction.commit()
print "refilling queue...", bp
else:
bp = bp[0]
updated = BitcoinAddress.objects.select_for_update().filter(Q(id=bp.id) & Q(active=False) & Q(wallet__isnull=True) & \
Q(least_received__lte=0)).update(active=True)
db_transaction.commit()
if updated:
return bp
else:
print "wallet transaction concurrency:", bp.address
class Payment(models.Model):
description = models.CharField(
max_length=255,
blank=True)
address = models.CharField(
max_length=50)
amount = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
amount_paid = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
active = models.BooleanField(default=False)
created_at = models.DateTimeField(default=datetime.datetime.now)
updated_at = models.DateTimeField()
paid_at = models.DateTimeField(null=True, default=None)
withdrawn_total = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
transactions = models.ManyToManyField(Transaction)
def calculate_amount(self, proportion):
return quantitize_bitcoin(
Decimal((proportion/Decimal("100.0"))*self.amount))
def add_transaction(self, amount, address):
self.withdrawn_total += amount
bctrans = self.transactions.create(
amount=amount,
address=address)
self.save()
return bctrans
def withdraw_proportion(self, address, proportion):
if proportion<=Decimal("0") or proportion>Decimal("100"):
raise Exception("Illegal proportion.")
amount = self.calculate_amount(proportion)
if self.amount-self.withdrawn_total > amount:
raise Exception("Trying to withdraw too much.")
self.add_transaction(amount, address)
bitcoind.send(address, amount)
@classmethod
def withdraw_proportion_all(cls, address, bitcoin_payments_proportions):
"""hash BitcoinPayment -> Proportion"""
final_amount=Decimal("0.0")
print bitcoin_payments_proportions
for bp, proportion in bitcoin_payments_proportions.iteritems():
am=bp.calculate_amount(proportion)
final_amount+=am
bp.add_transaction(am, address)
bitcoind.send(address, final_amount)
return True
def withdraw_amounts(self, addresses_shares):
"""hash address -> percentage (string -> Decimal)"""
if self.amount_paid<self.amount:
raise Exception("Not paid.")
if self.withdrawn_at:
raise Exception("Trying to withdraw again.")
if sum(addresses_shares.values())>100:
raise Exception("Sum of proportions must be <=100.")
#self.withdraw_addresses=",".join(addresses)
#self.withdraw_proportions=",".join([str(x) for x in proportions])
amounts=[]
for p in addresses_shares.values():
if p<=0:
raise Exception()
am=quantitize_bitcoin(Decimal((p/Decimal("100.0"))*self.amount))
amounts.append(am)
#self.withdraw_proportions=",".join([str(x) for x in ])
if sum(amounts)>self.amount:
raise Exception("Sum of calculated amounts exceeds funds.")
return amounts
@classmethod
def calculate_amounts(cls, bitcoinpayments, addresses_shares):
amounts_all=[Decimal("0.0") for _i in addresses_shares]
for amount, payment in zip(amounts_all, bitcoinpayments):
withdrawn=payment.withdraw_amounts(addresses_shares)
amounts_all=[(w+total) for w, total in zip(withdrawn, amounts_all)]
return amounts_all
@classmethod
def withdraw_all(cls, bitcoinpayments, addresses_shares):
#if len(bitcoinpayments)!=len(addresses_shares):
# raise Exception("")
amounts_all=Payment.calculate_amounts(bitcoinpayments, addresses_shares)
for bp in bitcoinpayments:
am=bp.withdraw_amounts(addresses_shares)
bp.withdraw_addresses=",".join(addresses_shares.keys())
bp.withdraw_proportions=",".join(
[str(x) for x in addresses_shares.values()])
bp.withdraw_amounts=",".join(
[str(x) for x in am])
bp.withdrawn_at=datetime.datetime.now()
bp.withdrawn_total=sum(am)
bp.save()
for i, share in enumerate(addresses_shares.keys()):
bitcoind.send(share, amounts_all[i])
return True
def is_paid(self, minconf=1):
if self.paid_at:
return True
self.update_payment(minconf=minconf)
return self.amount_paid>=self.amount
def getbalance(self, minconf=1):
return bitcoind.total_received(self.address, minconf=minconf)
def update_payment(self, minconf=1):
new_amount=Decimal(bitcoin_getbalance(self.address, minconf=minconf))
print "blaa", new_amount, self.address
if new_amount>=self.amount:
self.amount_paid=new_amount
self.paid_at=datetime.datetime.now()
self.save()
#elif (datetime.datetime.now()-self.updated_at)>datetime.timedelta(hours=PAYMENT_VALID_HOURS):
# self.deactivate()
def deactivate(self):
return False
if self.amount_paid > Decimal("0"):
return False
self.active=False
self.description=""
self.save()
return True
def save(self, **kwargs):
self.updated_at = datetime.datetime.now()
return super(Payment, self).save(**kwargs)
def __unicode__(self):
return unicode(self.amount_paid)
@models.permalink
def get_absolute_url(self):
return ('view_or_url_name',)
class WalletTransaction(models.Model):
created_at = models.DateTimeField(default=datetime.datetime.now)
from_wallet = models.ForeignKey(
'Wallet',
null=True,
related_name="sent_transactions")
to_wallet = models.ForeignKey(
'Wallet',
null=True,
related_name="received_transactions")
to_bitcoinaddress = models.CharField(
max_length=50,
blank=True)
outgoing_transaction = models.ForeignKey('OutgoingTransaction', null=True, default=None)
amount = models.DecimalField(
max_digits=16,
decimal_places=8,
default=Decimal("0.0"))
description = models.CharField(max_length=100, blank=True)
deposit_address = models.ForeignKey(BitcoinAddress, null=True)
txid = models.CharField(max_length=100, blank=True, null=True)
deposit_transaction = models.OneToOneField(DepositTransaction, null=True)
def __unicode__(self):
if self.from_wallet and self.to_wallet:
return u"Wallet transaction "+unicode(self.amount)
elif self.from_wallet and self.to_bitcoinaddress:
return u"Outgoing bitcoin transaction "+unicode(self.amount)
elif self.to_wallet and not self.from_wallet:
return u"Deposit "+unicode(self.amount)
return u"Fee "+unicode(self.amount)
def clean(self):
from django.core.exceptions import ValidationError
if not self.from_wallet and not self.to_wallet:
raise ValidationError('Wallet transaction error - define a wallet.')
def confirmation_status(self,
minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS,
transactions=None):
"""
Returns the confirmed and unconfirmed parts of this transfer.
Also accepts and returns a list of transactions that are being
currently used.
The sum of the two amounts is the total transaction amount.
"""
if not transactions: transactions = {}
if minconf == 0 or self.to_bitcoinaddress:
return (0, self.amount, transactions)
_, confirmed, txs = self.from_wallet.balance(minconf=minconf,
timeframe=self.created_at,
transactions=transactions)
transactions.update(txs)
if confirmed > self.amount: confirmed = self.amount
unconfirmed = self.amount - confirmed
return (unconfirmed, confirmed, transactions)
from django.db.models import Q
class Wallet(models.Model):
created_at = models.DateTimeField(default=datetime.datetime.now)
updated_at = models.DateTimeField()
label = models.CharField(max_length=50, blank=True)
# DEPRECATED: changed to foreign key
# addresses = models.ManyToManyField(BitcoinAddress, through="WalletBitcoinAddress")
transactions_with = models.ManyToManyField(
'self',
through=WalletTransaction,
symmetrical=False)
transaction_counter = models.IntegerField(default=1)
last_balance = models.DecimalField(default=Decimal(0), max_digits=16, decimal_places=8)
# track_transaction_value = models.BooleanField(default=False)
# tries to update instantly, if not succesful updates using sql query (celery task)
def update_last_balance(self, amount):
if self.__class__.objects.filter(id=self.id, last_balance=self.last_balance
).update(last_balance=(self.last_balance + amount)) < 1:
update_wallet_balance.apply_async((self.id,), countdown=1)
def __unicode__(self):
return u"%s: %s" % (self.label,
self.created_at.strftime('%Y-%m-%d %H:%M'))
def save(self, *args, **kwargs):
'''No need for labels.'''
self.updated_at = datetime.datetime.now()
super(Wallet, self).save(*args, **kwargs)
#super(Wallet, self).save(*args, **kwargs)
def receiving_address(self, fresh_addr=True):
while True:
usable_addresses = self.addresses.filter(active=True).order_by("id")
if fresh_addr:
usable_addresses = usable_addresses.filter(least_received=Decimal(0))
if usable_addresses.count():
return usable_addresses[0].address
addr = new_bitcoin_address()
updated = BitcoinAddress.objects.select_for_update().filter(Q(id=addr.id) & Q(active=True) & Q(least_received__lte=0) & Q(wallet__isnull=True))\
.update(active=True, wallet=self)
print "addr_id", addr.id, updated
# db_transaction.commit()
if updated:
return addr.address
else:
raise Exception("Concurrency error!")
def static_receiving_address(self):
''' Returns a static receiving address for this Wallet object.'''
return self.receiving_address(fresh_addr=False)
def send_to_wallet(self, otherWallet, amount, description=''):
if type(amount) != Decimal:
amount = Decimal(amount)
amount = amount.quantize(Decimal('0.00000001'))
with db_transaction.autocommit():
db_transaction.enter_transaction_management()
db_transaction.commit()
if settings.BITCOIN_UNCONFIRMED_TRANSFERS:
avail = self.total_balance_unconfirmed()
else:
avail = self.total_balance()
updated = Wallet.objects.filter(Q(id=self.id)).update(last_balance=avail)
if self == otherWallet:
raise Exception(_("Can't send to self-wallet"))
if not otherWallet.id or not self.id:
raise Exception(_("Some of the wallets not saved"))
if amount <= 0:
raise Exception(_("Can't send zero or negative amounts"))
if amount > avail:
raise Exception(_("Trying to send too much"))
# concurrency check
new_balance = avail - amount
updated = Wallet.objects.filter(Q(id=self.id) & Q(transaction_counter=self.transaction_counter) &
Q(last_balance=avail))\
.update(last_balance=new_balance, transaction_counter=self.transaction_counter+1)
if not updated:
print "wallet transaction concurrency:", new_balance, avail, self.transaction_counter, self.last_balance, self.total_balance()
raise Exception(_("Concurrency error with transactions. Please try again."))
# db_transaction.commit()
# concurrency check end
transaction = WalletTransaction.objects.create(
amount=amount,
from_wallet=self,
to_wallet=otherWallet,
description=description)
# db_transaction.commit()
self.transaction_counter = self.transaction_counter+1
self.last_balance = new_balance
# updated = Wallet.objects.filter(Q(id=otherWallet.id))\
# .update(last_balance=otherWallet.total_balance_sql())
otherWallet.update_last_balance(amount)
if settings.BITCOIN_TRANSACTION_SIGNALING:
balance_changed.send(sender=self,
changed=(Decimal(-1) * amount), transaction=transaction)
balance_changed.send(sender=otherWallet,
changed=(amount), transaction=transaction)
balance_changed_confirmed.send(sender=self,
changed=(Decimal(-1) * amount), transaction=transaction)
balance_changed_confirmed.send(sender=otherWallet,
changed=(amount), transaction=transaction)
return transaction
def send_to_address(self, address, amount, description='', expires_seconds=settings.BITCOIN_OUTGOING_DEFAULT_DELAY_SECONDS):
if settings.BITCOIN_DISABLE_OUTGOING:
raise Exception("Outgoing transactions disabled! contact support.")
address = address.strip()
if type(amount) != Decimal:
amount = Decimal(amount)
amount = amount.quantize(Decimal('0.00000001'))
if not is_valid_btc_address(str(address)):
raise Exception(_("Not a valid bitcoin address") + ":" + address)
if amount <= 0:
raise Exception(_("Can't send zero or negative amounts"))
# concurrency check
with db_transaction.autocommit():
db_transaction.enter_transaction_management()
db_transaction.commit()
avail = self.total_balance()
updated = Wallet.objects.filter(Q(id=self.id)).update(last_balance=avail)
if amount > avail:
raise Exception(_("Trying to send too much"))
new_balance = avail - amount
updated = Wallet.objects.filter(Q(id=self.id) & Q(transaction_counter=self.transaction_counter) &
Q(last_balance=avail) )\
.update(last_balance=new_balance, transaction_counter=self.transaction_counter+1)
if not updated:
print "address transaction concurrency:", new_balance, avail, self.transaction_counter, self.last_balance, self.total_balance()
raise Exception(_("Concurrency error with transactions. Please try again."))
# concurrency check end
outgoing_transaction = OutgoingTransaction.objects.create(amount=amount, to_bitcoinaddress=address,
expires_at=datetime.datetime.now()+datetime.timedelta(seconds=expires_seconds))
bwt = WalletTransaction.objects.create(
amount=amount,
from_wallet=self,
to_bitcoinaddress=address,
outgoing_transaction=outgoing_transaction,
description=description)
process_outgoing_transactions.apply_async((), countdown=(expires_seconds+1))
# try:
# result = bitcoind.send(address, amount)
# except jsonrpc.JSONRPCException:
# bwt.delete()
# updated2 = Wallet.objects.filter(Q(id=self.id) & Q(last_balance=new_balance)).update(last_balance=avail)
# raise
self.transaction_counter = self.transaction_counter+1
self.last_balance = new_balance
# check if a transaction fee exists, and deduct it from the wallet
# TODO: because fee can't be known beforehand, can result in negative wallet balance.
# currently isn't much of a issue, but might be in the future, depending of the application
# transaction = bitcoind.gettransaction(result)
# fee_transaction = None
# total_amount = amount
# if Decimal(transaction['fee']) < Decimal(0):
# fee_transaction = WalletTransaction.objects.create(
# amount=Decimal(transaction['fee']) * Decimal(-1),
# from_wallet=self)
# total_amount += fee_transaction.amount
# updated = Wallet.objects.filter(Q(id=self.id))\
# .update(last_balance=new_balance-fee_transaction.amount)
if settings.BITCOIN_TRANSACTION_SIGNALING:
balance_changed.send(sender=self,
changed=(Decimal(-1) * amount), transaction=bwt)
balance_changed_confirmed.send(sender=self,
changed=(Decimal(-1) * amount), transaction=bwt)
return (bwt, None)
def update_transaction_cache(self,
mincf=settings.BITCOIN_MINIMUM_CONFIRMATIONS):
"""
Finds the timestamp from the oldest transaction found with wasn't yet
confirmed. If none, returns the current timestamp.
"""
if mincf == 0: return datetime.datetime.now()
transactions_checked = "bitcoin_transactions_checked_%d" % mincf
oldest_unconfirmed = "bitcoin_oldest_unconfirmed_%d" % mincf
if cache.get(transactions_checked):
return cache.get(oldest_unconfirmed)
else:
cache.set(transactions_checked, True, 60*15)
current_timestamp = datetime.datetime.now()
transactions = WalletTransaction.objects.all()
oldest = cache.get(oldest_unconfirmed)
if oldest:
transactions = transactions.filter(created_at__gte=oldest)
transactions_cache = {}
for t in transactions.order_by('created_at'):
unc, _, txs = t.confirmation_status(minconf=mincf, transactions=transactions_cache)
transactions_cache.update(txs)
if unc:
cache.set(oldest_unconfirmed, t.created_at)
return t.created_at
cache.set(oldest_unconfirmed, current_timestamp)
return current_timestamp
def balance(self, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS,
timeframe=None, transactions=None):
"""
Returns a "greater or equal than minimum" total ammount received at
this wallet with the given confirmations at the given timeframe.
"""
if minconf == settings.BITCOIN_MINIMUM_CONFIRMATIONS:
return self.total_balance_sql(True)
elif minconf == 0:
return self.total_balance_sql(False)
raise Exception("Incorrect minconf parameter")
def total_balance_sql(self, confirmed=True):
from django.db import connection
cursor = connection.cursor()
if confirmed == False:
sql="""
SELECT IFNULL((SELECT SUM(least_received) FROM django_bitcoin_bitcoinaddress ba WHERE ba.wallet_id=%(id)s), 0)
+ IFNULL((SELECT SUM(amount) FROM django_bitcoin_wallettransaction wt WHERE wt.to_wallet_id=%(id)s AND wt.from_wallet_id>0), 0)
- IFNULL((SELECT SUM(amount) FROM django_bitcoin_wallettransaction wt WHERE wt.from_wallet_id=%(id)s), 0) as total_balance;
""" % {'id': self.id}
cursor.execute(sql)
return cursor.fetchone()[0]
else:
sql="""
SELECT IFNULL((SELECT SUM(least_received_confirmed) FROM django_bitcoin_bitcoinaddress ba WHERE ba.wallet_id=%(id)s AND ba.migrated_to_transactions=0), 0)
+ IFNULL((SELECT SUM(amount) FROM django_bitcoin_wallettransaction wt WHERE wt.to_wallet_id=%(id)s), 0)
- IFNULL((SELECT SUM(amount) FROM django_bitcoin_wallettransaction wt WHERE wt.from_wallet_id=%(id)s), 0) as total_balance;
""" % {'id': self.id}
cursor.execute(sql)
self.last_balance = cursor.fetchone()[0]
return self.last_balance
def total_balance(self, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS):
"""
Returns the total confirmed balance from the Wallet.
"""
if not settings.BITCOIN_UNCONFIRMED_TRANSFERS:
# if settings.BITCOIN_TRANSACTION_SIGNALING:
# if minconf == settings.BITCOIN_MINIMUM_CONFIRMATIONS:
# return self.total_balance_sql()
# elif mincof == 0:
# self.total_balance_sql(False)
if minconf >= settings.BITCOIN_MINIMUM_CONFIRMATIONS:
self.last_balance = self.total_received(minconf) - self.total_sent()
return self.last_balance
else:
return self.total_received(minconf) - self.total_sent()
else:
return self.balance(minconf)[1]
def total_balance_historical(self, balance_date, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS):
if settings.BITCOIN_TRANSACTION_SIGNALING:
if minconf == settings.BITCOIN_MINIMUM_CONFIRMATIONS:
s = self.addresses.filter(created_at__lte=balance_date, migrated_to_transactions=False).aggregate(models.Sum("least_received_confirmed"))['least_received_confirmed__sum'] or Decimal(0)
elif minconf == 0:
s = self.addresses.filter(created_at__lte=balance_date, migrated_to_transactions=False).aggregate(models.Sum("least_received"))['least_received__sum'] or Decimal(0)
else:
s = sum([a.received(minconf=minconf) for a in self.addresses.filter(created_at__lte=balance_date, migrated_to_transactions=False)])
else:
s = sum([a.received(minconf=minconf) for a in self.addresses.filter(created_at__lte=balance_date)])
rt = self.received_transactions.filter(created_at__lte=balance_date).aggregate(models.Sum("amount"))['amount__sum'] or Decimal(0)
received = (s + rt)
sent = self.sent_transactions.filter(created_at__lte=balance_date).aggregate(models.Sum("amount"))['amount__sum'] or Decimal(0)
return received - sent
def total_balance_unconfirmed(self):
if not settings.BITCOIN_UNCONFIRMED_TRANSFERS:
return self.total_received(0) - self.total_sent()
else:
x = self.balance()
return x[0] + x[1]
def unconfirmed_balance(self):
if not settings.BITCOIN_UNCONFIRMED_TRANSFERS:
return self.total_received(0) - self.total_sent()
else:
return self.balance()[0]
def total_received(self, minconf=settings.BITCOIN_MINIMUM_CONFIRMATIONS):
"""Returns the raw ammount ever received by this wallet."""
if settings.BITCOIN_TRANSACTION_SIGNALING:
if minconf == settings.BITCOIN_MINIMUM_CONFIRMATIONS:
s = self.addresses.filter(migrated_to_transactions=False).aggregate(models.Sum("least_received_confirmed"))['least_received_confirmed__sum'] or Decimal(0)
elif minconf == 0:
s = self.addresses.all().aggregate(models.Sum("least_received"))['least_received__sum'] or Decimal(0)
else:
s = sum([a.received(minconf=minconf) for a in self.addresses.filter(migrated_to_transactions=False)])
else:
s = sum([a.received(minconf=minconf) for a in self.addresses.filter(migrated_to_transactions=False)])
if minconf == 0:
rt = self.received_transactions.filter(from_wallet__gte=1).aggregate(models.Sum("amount"))['amount__sum'] or Decimal(0)
else:
rt = self.received_transactions.aggregate(models.Sum("amount"))['amount__sum'] or Decimal(0)
return (s + rt)
def total_sent(self):
"""Returns the raw ammount ever sent by this wallet."""
return self.sent_transactions.aggregate(models.Sum("amount"))['amount__sum'] or Decimal(0)
def has_history(self):
"""Returns True if this wallet was any transacion history."""
if self.received_transactions.all().count():
return True
if self.sent_transactions.all().count():
return True
if filter(lambda x: x.received(), self.addresses.all()):
return True
return False
def merge_wallet(self, other_wallet):
if self.id>0 and other_wallet.id>0:
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("UPDATE django_bitcoin_bitcoinaddress SET wallet_id="+str(other_wallet.id)+\
" WHERE wallet_id="+str(self.id))
cursor.execute("UPDATE django_bitcoin_wallettransaction SET from_wallet_id="+str(other_wallet.id)+\
" WHERE from_wallet_id="+str(self.id))
cursor.execute("UPDATE django_bitcoin_wallettransaction SET to_wallet_id="+str(other_wallet.id)+\
" WHERE to_wallet_id="+str(self.id))
cursor.execute("DELETE FROM django_bitcoin_wallettransaction WHERE to_wallet_id=from_wallet_id")
transaction.commit_unless_managed()
# def save(self, **kwargs):
# self.updated_at = datetime.datetime.now()
# super(Wallet, self).save(**kwargs)
### Maybe in the future
# class FiatWalletTransaction(models.Model):
# """Transaction for storing fiat currencies"""
# pass
# class FiatWallet(models.Model):
# """Wallet for storing fiat currencies"""
# pass
# class BitcoinEscrow(models.Model):
# """Bitcoin escrow payment"""
# created_at = models.DateTimeField(auto_now_add=True)
# updated_at = models.DateTimeField(auto_now=True)
# seller = models.ForeignKey(User)
# bitcoin_payment = models.ForeignKey(Payment)
# confirm_hash = models.CharField(max_length=50, blank=True)
# buyer_address = models.TextField()
# buyer_phone = models.CharField(max_length=20, blank=True)
# buyer_email = models.EmailField(max_length=75)
# def save(self, **kwargs):
# super(BitcoinEscrow, self).save(**kwargs)
# if not self.confirm_hash:
# self.confirm_hash=generateuniquehash(
# length=32,
# extradata=str(self.id))
# super(BitcoinEscrow, self).save(**kwargs)
# @models.permalink
# def get_absolute_url(self):
# return ('view_or_url_name',)
def refill_payment_queue():
c=BitcoinAddress.objects.filter(active=False, wallet=None).count()
# print "count", c
if settings.BITCOIN_ADDRESS_BUFFER_SIZE>c:
for i in range(0,settings.BITCOIN_ADDRESS_BUFFER_SIZE-c):
BitcoinAddress.objects.create(address = bitcoind.create_address(), active=False)
def update_payments():
if not cache.get('last_full_check'):
cache.set('bitcoinprice', cache.get('bitcoinprice_old'))
bps=BitcoinPayment.objects.filter(active=True)
for bp in bps:
bp.amount_paid=Decimal(bitcoin_getbalance(bp.address))
bp.save()
print bp.amount
print bp.amount_paid
@transaction.commit_on_success
def new_bitcoin_payment(amount):
bp=BitcoinPayment.objects.filter(active=False)
if len(bp)<1:
refill_payment_queue()
bp=BitcoinPayment.objects.filter(active=False)
bp=bp[0]
bp.active=True
bp.amount=amount
bp.save()
return bp
def getNewBitcoinPayment(amount):
warnings.warn("Use new_bitcoin_payment(amount) instead",
DeprecationWarning)
return new_bitcoin_payment(amount)
@transaction.commit_on_success
def new_bitcoin_payment_eur(amount):
print bitcoinprice_eur()
return new_bitcoin_payment(Decimal(amount)/Decimal(bitcoinprice_eur()['24h']))
def getNewBitcoinPayment_eur(amount):
return new_bitcoin_payment_eur(amount)
# initialize the conversion module
from django_bitcoin import currency
from django.core import urlresolvers
from django.utils import importlib
for dottedpath in settings.BITCOIN_CURRENCIES:
mod, func = urlresolvers.get_mod_func(dottedpath)
klass = getattr(importlib.import_module(mod), func)
currency.exchange.register_currency(klass())
# Historical prie storage
class HistoricalPrice(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(max_digits=16, decimal_places=2)
params = models.CharField(max_length=50)
currency = models.CharField(max_length=10)
class Meta:
verbose_name = _('HistoricalPrice')
verbose_name_plural = _('HistoricalPrices')
def __unicode__(self):
return str(self.created_at) + " - " + str(self.price) + " - " + str(self.params)
def set_historical_price(curr="EUR"):
markets = currency.markets_chart()
# print markets
markets_currency = sorted(filter(lambda m: m['currency']==curr and m['volume']>1 and not m['symbol'].startswith("mtgox"),
markets.values()), key=lambda m: -m['volume'])[:3]
# print markets_currency
price = sum([m['avg'] for m in markets_currency]) / len(markets_currency)
hp = HistoricalPrice.objects.create(price=Decimal(str(price)), params=",".join([m['symbol']+"_avg" for m in markets_currency]), currency=curr,
created_at=datetime.datetime.now())
print "Created new",hp
return hp
def get_historical_price_object(dt=None, curr="EUR"):
query = HistoricalPrice.objects.filter(currency=curr)
if dt:
try:
query = query.filter(created_at__lte=dt).order_by("-created_at")
return query[0]
except IndexError:
return None
try:
# print datetime.datetime.now()
query=HistoricalPrice.objects.filter(currency=curr,
created_at__gte=datetime.datetime.now() - datetime.timedelta(minutes=settings.HISTORICALPRICES_FETCH_TIMESPAN_MINUTES)).\
order_by("-created_at")
# print query
return query[0]
except IndexError:
return set_historical_price()
def get_historical_price(dt=None, curr="EUR"):
return get_historical_price_object().price
# EOF
| |
# -*- coding: utf-8 -*-
"""English US keymap."""
keymap = {
# Digits
"0": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
# Letters
"a": "A",
"b": "B",
"c": "C",
"d": "D",
"e": "E",
"f": "F",
"g": "G",
"h": "H",
"i": "I",
"j": "J",
"k": "K",
"l": "L",
"m": "M",
"n": "N",
"o": "O",
"p": "P",
"q": "Q",
"r": "R",
"s": "S",
"t": "T",
"u": "U",
"v": "V",
"w": "W",
"x": "X",
"y": "Y",
"z": "Z",
# Space
"space": "Space",
# Punctuation
"backslash": "\\",
"bar": "|",
"brace-left": "{",
"brace-right": "}",
"bracket-left": "[",
"bracket-right": "]",
"colon": ":",
"comma": ",",
"double-quote": "\"",
"equal": "=",
"exclam": "!",
"grave": "`",
"greater": ">",
"less": "<",
"minus": "-",
"period": ".",
"plus": "+",
"question": "?",
"semicolon": ";",
"single-quote": "'",
"slash": "/",
"tilde": "~",
"underscore": "_",
# Navigation keys
"arrow-up": "Up",
"arrow-down": "Down",
"arrow-left": "Left",
"arrow-right": "Right",
"page-up": "Page Up",
"page-down": "Page Down",
"home": "Home",
"end": "End",
# Edit keys
"backspace": "Backspace",
"delete": "Del",
"insert": "Ins",
"tab": "Tab",
# Action keys
"break": "Break",
"caps-lock": "Caps Lock",
"clear": "Clear",
"eject": "Eject",
"enter": "Enter",
"escape": "Esc",
"help": "Help",
"print-screen": "Print Screen",
"scroll-lock": "Scroll Lock",
# Numeric keypad
"num0": "Num 0",
"num1": "Num 1",
"num2": "Num 2",
"num3": "Num 3",
"num4": "Num 4",
"num5": "Num 5",
"num6": "Num 6",
"num7": "Num 7",
"num8": "Num 8",
"num9": "Num 9",
"num-asterisk": "Num *",
"num-clear": "Num Clear",
"num-delete": "Num Del",
"num-equal": "Num =",
"num-lock": "Num Lock",
"num-minus": "Num -",
"num-plus": "Num +",
"num-separator": "Num .",
"num-slash": "Num /",
"num-enter": "Num Enter",
# Modifier keys
"alt": "Alt",
"alt-graph": "AltGr",
"command": "Cmd",
"control": "Ctrl",
"function": "Fn",
"left-alt": "Left Alt",
"left-command": "Left Command",
"left-control": "Left Ctrl",
"left-meta": "Left Meta",
"left-option": "Left Option",
"left-shift": "Left Shift",
"left-super": "Left Super",
"left-windows": "Left Win",
"meta": "Meta",
"option": "Option",
"right-alt": "Right Alt",
"right-command": "Right Command",
"right-control": "Right Ctrl",
"right-meta": "Right Meta",
"right-option": "Right Option",
"right-shift": "Right Shift",
"right-super": "Right Super",
"right-windows": "Right Win",
"shift": "Shift",
"super": "Super",
"windows": "Win",
# Function keys
"f1": "F1",
"f2": "F2",
"f3": "F3",
"f4": "F4",
"f5": "F5",
"f6": "F6",
"f7": "F7",
"f8": "F8",
"f9": "F9",
"f10": "F10",
"f11": "F11",
"f12": "F12",
"f13": "F13",
"f14": "F14",
"f15": "F15",
"f16": "F16",
"f17": "F17",
"f18": "F18",
"f19": "F19",
"f20": "F20",
"f21": "F21",
"f22": "F22",
"f23": "F23",
"f24": "F24",
# Extra keys
"backtab": "Back Tab",
"browser-back": "Browser Back",
"browser-favorites": "Browser Favorites",
"browser-forward": "Browser Forward",
"browser-home": "Browser Home",
"browser-refresh": "Browser Refresh",
"browser-search": "Browser Search",
"browser-stop": "Browser Stop",
"context-menu": "Menu",
"copy": "Copy",
"mail": "Mail",
"media": "Media",
"media-next-track": "Next Track",
"media-pause": "Pause",
"media-play": "Play",
"media-play-pause": "Play/Pause",
"media-prev-track": "Previous Track",
"media-stop": "Stop",
"print": "Print",
"reset": "Reset",
"select": "Select",
"sleep": "Sleep",
"volume-down": "Volume Down",
"volume-mute": "Mute",
"volume-up": "Volume Up",
"zoom": "Zoom",
# Mouse
"left-button": "Left Button",
"middle-button": "Middle Button",
"right-button": "Right Button",
"x-button1": "X Button 1",
"x-button2": "X Button 2"
}
aliases = {
"add": "num-plus",
"altgr": "alt-graph",
"apps": "context-menu",
"back": "backspace",
"bksp": "backspace",
"bktab": "backtab",
"cancel": "break",
"capital": "caps-lock",
"close-brace": "brace-right",
"close-bracket": "bracket-right",
"clr": "clear",
"cmd": "command",
"cplk": "caps-lock",
"ctrl": "control",
"dblquote": "double-quote",
"decimal": "num-separator",
"del": "delete",
"divide": "num-slash",
"down": "arrow-down",
"esc": "escape",
"return": "enter",
"exclamation": "exclam",
"favorites": "browser-favorites",
"fn": "function",
"forward": "browser-forward",
"grave-accent": "grave",
"greater-than": "greater",
"gt": "greater",
"hyphen": "minus",
"ins": "insert",
"lalt": "left-alt",
"launch-mail": "mail",
"launch-media": "media",
"lbutton": "left-button",
"lcmd": "left-command",
"lcommand": "left-command",
"lcontrol": "left-control",
"lctrl": "left-control",
"left": "arrow-left",
"left-cmd": "left-command",
"left-ctrl": "left-control",
"lopt": "left-option",
"loption": "left-option",
"left-opt": "left-option",
"left-win": "left-windows",
"less-than": "less",
"lmeta": "left-meta",
"lshift": "left-shift",
"lsuper": "left-super",
"lt": "less",
"lwin": "left-windows",
"lwindows": "left-windows",
"mbutton": "middle-button",
"menu": "context-menu",
"multiply": "num-asterisk",
"mute": "volume-mute",
"next": "page-down",
"next-track": "media-next-track",
"num-del": "num-delete",
"numlk": "num-lock",
"open-brace": "brace-left",
"open-bracket": "bracket-left",
"opt": "option",
"page-dn": "page-down",
"page-up": "page-up",
"pause": "media-pause",
"pg-dn": "page-down",
"pg-up": "page-up",
"pipe": "bar",
"play": "media-play",
"play-pause": "media-play-pause",
"prev-track": "media-prev-track",
"prior": "page-up",
"prtsc": "print-screen",
"question-mark": "question",
"ralt": "right-alt",
"rbutton": "right-button",
"rcontrol": "right-control",
"rcmd": "right-command",
"rcommand": "right-command",
"rctrl": "right-control",
"refresh": "browser-refresh",
"right": "arrow-right",
"right-cmd": "right-command",
"right-ctrl": "right-control",
"right-meta": "right-meta",
"right-opt": "right-option",
"right-win": "right-windows",
"rmeta": "right-meta",
"ropt": "right-option",
"roption": "right-option",
"rshift": "right-shift",
"rsuper": "right-super",
"rwin": "right-windows",
"rwindows": "right-windows",
"scroll": "scroll-lock",
"search": "browser-search",
"separator": "num-separator",
"spc": "space",
"stop": "media-stop",
"subtract": "num-minus",
"tabulator": "tab",
"up": "arrow-up",
"vol-down": "volume-down",
"vol-mute": "volume-mute",
"vol-up": "volume-up",
"win": "windows",
"xbutton1": "x-button1",
"xbutton2": "x-button2"
}
| |
import csv
import datetime
import json
import re
import uuid
from operator import itemgetter
from typing import (
Callable,
Dict,
MutableSequence,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import tabulate
from looker_sdk import error
from looker_sdk.rtl import api_settings, auth_session, requests_transport, serialize
from looker_sdk.sdk import methods, models
from henry.modules import exceptions
from .. import __version__ as pkg
TResult = MutableSequence[Dict[str, Union[str, int, bool]]]
class Fetcher:
def __init__(self, options: "Input"):
self.timeframe = f"{options.timeframe} days" if options.timeframe else "90 days"
self.min_queries = options.min_queries or 0
self.limit = options.limit[0] if options.limit else None
self.sortkey = options.sortkey
cmd = options.command
sub_cmd = options.subcommand or None
self.cmd = f"{cmd}_{sub_cmd}" if sub_cmd else cmd
self.save = options.save
self.quiet = options.quiet
self.sdk = self.configure_sdk(
options.config_file, options.section, options.timeout
)
self._verify_api_credentials()
def configure_sdk(
self, config_file: str, section: str, timeout: Optional[int],
) -> methods.LookerSDK:
"""Instantiates and returns a LookerSDK object and overrides default timeout if
specified by user.
"""
settings = api_settings.ApiSettings.configure(config_file, section)
user_agent_tag = f"Henry v{pkg.__version__}: cmd={self.cmd}, sid={uuid.uuid1()}"
settings.headers = {
"Content-Type": "application/json",
"User-Agent": user_agent_tag,
}
if timeout:
settings.timeout = timeout
settings.api_version = "3.1"
transport = requests_transport.RequestsTransport.configure(settings)
return methods.LookerSDK(
auth_session.AuthSession(settings, transport, serialize.deserialize),
serialize.deserialize,
serialize.serialize,
transport,
)
def _verify_api_credentials(self):
try:
self.sdk.me()
except error.SDKError as e:
print("Error retreiving self using API. Please check your credentials.")
raise (e)
def get_projects(
self, project_id: Optional[str] = None
) -> Sequence[models.Project]:
"""Returns a list of projects."""
try:
if project_id:
projects = [self.sdk.project(project_id)]
else:
projects = self.sdk.all_projects()
except error.SDKError:
raise exceptions.NotFoundError("An error occured while getting projects.")
return projects
def get_models(
self, *, project: Optional[str] = None, model: Optional[str] = None
) -> Sequence[models.LookmlModel]:
"""Returns a list of lookml models."""
ret: Sequence[models.LookmlModel]
if project:
self.get_projects(project)
try:
if model:
ml = [self.sdk.lookml_model(model)]
else:
ml = self.sdk.all_lookml_models()
except error.SDKError:
raise exceptions.NotFoundError("An error occured while getting models.")
else:
if project:
# .lower() is used so behavior is consistent with /project endpoint
ml = list(
filter(lambda m: m.project_name.lower() == project.lower(), ml,) # type: ignore # noqa: B950
)
ml = list(filter(lambda m: cast(bool, m.has_content), ml))
return ml
def get_used_models(self) -> Dict[str, int]:
"""Returns a dictionary with model names as keys and query count as values."""
resp = self.sdk.run_inline_query(
"json",
models.WriteQuery(
model="i__looker",
view="history",
fields=["history.query_run_count, query.model"],
filters={
"history.created_date": self.timeframe,
"query.model": "-system^_^_activity, -i^_^_looker",
"history.query_run_count": ">0",
"user.dev_mode": "No",
},
limit="5000",
),
)
_results: MutableSequence[Dict[str, int]] = json.loads(resp)
results = {
str(row["query.model"]): int(row["history.query_run_count"])
for row in _results
}
return results
def get_explores(
self, *, model: Optional[str] = None, explore: Optional[str] = None
) -> Sequence[models.LookmlModelExplore]:
"""Returns a list of explores."""
try:
if model and explore:
explores = [self.sdk.lookml_model_explore(model, explore)]
elif not explore:
all_models = self.get_models(model=model)
explores = []
for m in all_models:
assert isinstance(m.name, str)
assert isinstance(m.explores, list)
explores.extend(
[
self.sdk.lookml_model_explore(m.name, cast(str, e.name))
for e in m.explores
]
)
except error.SDKError:
raise exceptions.NotFoundError(
"An error occured while getting models/explores."
)
return explores
def get_used_explores(
self, *, model: Optional[str] = None, explore: str = ""
) -> Dict[str, int]:
"""Returns a dictionary with used explore names as keys and query count as
values.
"""
resp = self.sdk.run_inline_query(
"json",
models.WriteQuery(
model="i__looker",
view="history",
fields=["query.view", "history.query_run_count"],
filters={
"history.created_date": self.timeframe,
"query.model": model.replace("_", "^_") if model else "",
"history.query_run_count": ">0",
"query.view": explore,
"user.dev_mode": "No",
},
limit="5000",
),
)
_results: MutableSequence[Dict[str, int]] = json.loads(resp)
results = {
cast(str, r["query.view"]): r["history.query_run_count"] for r in _results
}
return results
def get_unused_explores(self, model: str):
"""Returns a list of explores that do not meet the min query count requirement
for the specified timeframe.
"""
_all = self.get_explores(model=model)
used = self.get_used_explores(model=model)
# Keep only explores that satisfy the min_query requirement
used = self._filter(data=used, condition=lambda x: x[1] >= self.min_queries)
unused_explores = [e.name for e in _all if e.name not in used.keys()]
return unused_explores
def get_explore_fields(self, explore: models.LookmlModelExplore) -> Sequence[str]:
"""Return a list of non hidden fields for a given explore"""
fields = explore.fields
dimensions = [cast(str, f.name) for f in fields.dimensions if not f.hidden] # type: ignore # noqa: B950
measures = [cast(str, f.name) for f in fields.measures if not f.hidden] # type: ignore # noqa B950
result = sorted(list(set([*dimensions, *measures])))
return result
def get_used_explore_fields(
self, *, model: str, explore: str = ""
) -> Dict[str, int]:
"""Returns a list of model.view scoped explore fields as well as the
number of times they were used in the specified timeframe as value.
Should always be called with either model, or model and explore
"""
resp = self.sdk.run_inline_query(
"json",
models.WriteQuery(
model="i__looker",
view="history",
fields=[
"query.model",
"query.view",
"query.formatted_fields",
"query.formatted_filters",
"history.query_run_count",
],
filters={
"history.created_date": self.timeframe,
"query.model": model.replace("_", "^_"),
"query.view": explore.replace("_", "^_") if explore else "",
"query.formatted_fields": "-NULL",
"history.workspace_id": "production",
},
limit="5000",
),
)
data = json.loads(resp)
used_fields: Dict[str, int] = {}
for row in data:
model = row["query.model"]
explore = row["query.view"]
fields = re.findall(r"(\w+\.\w+)", row["query.formatted_fields"])
recorded = []
for f in fields:
if used_fields.get(f):
used_fields[f] += row["history.query_run_count"]
else:
used_fields[f] = row["history.query_run_count"]
recorded.append(f)
# A field used as a filter in a query is not listed in
# query.formatted_fields BUT if the field is used as both a filter
# and a dimension/measure, it's listed in both query.formatted_fields
# and query.formatted_filters. The recorded variable keeps track of
# this, so that no double counting occurs.
filters = row["query.formatted_filters"]
if filters:
parsed_filters = re.findall(r"(\w+\.\w+)+", filters)
for f in parsed_filters:
if f in recorded:
continue
elif used_fields.get(f):
used_fields[f] += row["history.query_run_count"]
else:
used_fields[f] = row["history.query_run_count"]
return used_fields
def get_explore_field_stats(
self, explore: models.LookmlModelExplore
) -> Dict[str, int]:
"""Return a dictionary with all exposed field names as keys and field query
count as values.
"""
assert isinstance(explore.model_name, str)
assert isinstance(explore.name, str)
all_fields = self.get_explore_fields(explore=explore)
field_stats = self.get_used_explore_fields(
model=explore.model_name, explore=explore.name
)
for field in all_fields:
if not field_stats.get(field):
field_stats[field] = 0
return field_stats
def get_explore_join_stats(
self, *, explore: models.LookmlModelExplore, field_stats: Dict[str, int]
) -> Dict[str, int]:
"""Returns dict containing stats about all joins in an explore."""
assert isinstance(explore.scopes, MutableSequence)
all_joins = explore.scopes
all_joins.remove(explore.name)
join_stats: Dict[str, int] = {}
if all_joins:
for field, query_count in field_stats.items():
join = field.split(".")[0] # Because all fields are view (join) scoped
if join == explore.name:
continue
elif join_stats.get(join):
join_stats[join] += query_count
else:
join_stats[join] = query_count
for join in all_joins:
if not join_stats.get(join):
join_stats[join] = 0
return join_stats
def run_git_connection_tests(self, project_id: str):
"""Run all git connection tests for a given project."""
self.sdk.update_session(models.WriteApiSession(workspace_id="dev"))
supported_tests = self.sdk.all_git_connection_tests(project_id)
results = []
for test in supported_tests:
assert isinstance(test.id, str)
resp = self.sdk.run_git_connection_test(project_id, test.id)
results.append(resp)
if resp.status != "pass":
break
self.sdk.update_session(models.WriteApiSession(workspace_id="production"))
errors = list(filter(lambda r: r.status != "pass", results))
formatted_results = [f"{r.id} ({r.status})" for r in results]
return "\n".join(formatted_results) if errors else "OK"
def _filter(
self, data: Optional[Dict[str, int]], condition: Optional[Callable] = None
) -> Dict[str, int]:
"""Filters based on min_queries condition. By default, it returns rows that do
not satisfy the min_queries requirement. "condition" can be passed to override
this behavior.
"""
result: Dict[str, int]
if not data:
result = dict()
elif condition:
result = dict(filter(condition, data.items()))
else:
result = dict(filter(lambda e: e[1] <= self.min_queries, data.items()))
return result
def _limit(
self, data: Sequence[Dict[str, Union[int, str, bool]]]
) -> Sequence[Dict[str, Union[int, str, bool]]]:
"""Limits results printed on screen"""
data = data[: self.limit] if self.limit else data
return data
def _sort(
self, data: Sequence[Dict[str, Union[int, str, bool]]]
) -> Sequence[Dict[str, Union[int, str, bool]]]:
"""Sorts results as specified by user"""
if self.sortkey:
sort_key = self.sortkey[0]
if sort_key not in data[0].keys():
raise KeyError(f"Sort field {sort_key} not found.")
sort_types = {"ASC": False, "DESC": True}
if self.sortkey[1].upper() not in sort_types.keys():
raise KeyError(f"Unrecognized sort type: {self.sortkey[1]}.")
sort_type = sort_types[self.sortkey[1].upper()]
data = sorted(data, key=itemgetter(sort_key), reverse=sort_type)
return data
def _save_to_file(self, data: Sequence[Dict[str, Union[int, str]]]):
"""Save results to a file with name {command}_date_time.csv"""
date = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
filename = f"{self.cmd}_{date}.csv"
with open(filename, "w", newline="") as csvfile:
# Replace "\n" which is required when printing, with ','
data = list(
map(
lambda x: {k: str(v).replace("\n", ",") for k, v in x.items()},
data,
)
)
writer = csv.DictWriter(csvfile, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
def _tabularize_and_print(
self, data: Sequence[Dict[str, Union[int, str, bool]]],
):
"""Prints data in tabular form."""
if not data:
print("\bNo results found.", end="\n" * 2)
else:
result = tabulate.tabulate(
data, headers="keys", tablefmt="psql", numalign="center"
)
print(f"\b{result}", end="\n" * 2)
def output(self, data: Sequence[Dict[str, Union[int, str, bool]]]):
"""Output generated results and/or save"""
data = self._sort(data)
data = self._limit(data)
if self.save:
self._save_to_file(data)
if not self.quiet:
self._tabularize_and_print(data)
class Input(NamedTuple):
command: str
subcommand: Optional[str] = None
project: Optional[str] = None
model: Optional[str] = None
explore: Optional[str] = None
timeframe: Optional[int] = 90
min_queries: Optional[int] = 0
sortkey: Optional[Tuple[str, str]] = None
limit: Optional[Sequence[int]] = None
config_file: str = "looker.ini"
section: str = "Looker"
quiet: bool = False
save: Optional[bool] = False
timeout: Optional[int] = 120
| |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
required: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
required: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
required: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
required: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
If you choose to update your scheme with a different value the ELB will be destroyed and
recreated. To update scheme you must use the option wait.
choices: ["internal", "internet-facing"]
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
import random
import time
import traceback
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, connect_to_aws, AnsibleAWSError, get_aws_connection_info
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
try:
self.elb = self._get_elb()
except boto.exception.BotoServerError as e:
module.fail_json(msg='unable to get all load balancers: %s' % e.message, exception=traceback.format_exc())
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
if self._get_scheme():
# the only way to change the scheme is by recreating the resource
self.ensure_gone()
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout)
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, Exception) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout // polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, Exception) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, Exception) as e:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to enable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
self.module.fail_json(msg='unable to disable zones: %s' % e.message, exception=traceback.format_exc())
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _get_scheme(self):
"""Determine if the current scheme is different than the scheme of the ELB"""
if self.scheme:
if self.elb.scheme != self.scheme:
if not self.wait:
self.module.fail_json(msg="Unable to modify scheme without using the wait option")
return True
return False
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.items():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.items():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=None):
policy = [] if policy is None else policy
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
try:
expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None
except ValueError:
self.module.fail_json(msg='expiration must be set to an integer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']):
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif not self.module.boolean(self.stickiness['enabled']):
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']},
connection_draining_timeout={'default': None, 'required': False, 'type': 'int'},
idle_timeout={'default': None, 'type': 'int', 'required': False},
cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, string_types):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8; -*-
import re
import os.path
import inspect
import subprocess
import platform
import jinja2
import shlex
from jinja2.runtime import StrictUndefined
import ino.filters
from ino.commands.base import Command
from ino.environment import Version
from ino.filters import colorize
from ino.utils import SpaceList, list_subdirs
from ino.exc import Abort
class Build(Command):
"""
Build a project in the current directory and produce a ready-to-upload
firmware file.
The project is expected to have a `src' subdirectory where all its sources
are located. This directory is scanned recursively to find
*.[c|cpp|pde|ino] files. They are compiled and linked into resulting
firmware hex-file.
Also any external library dependencies are tracked automatically. If a
source file includes any library found among standard Arduino libraries or
a library placed in `lib' subdirectory of the project, the library gets
built too.
Build artifacts are placed in `.build' subdirectory of the project.
"""
name = 'build'
help_line = "Build firmware from the current directory project"
default_make = 'make'
default_cc = 'avr-gcc'
default_cxx = 'avr-g++'
default_ar = 'avr-ar'
default_objcopy = 'avr-objcopy'
default_cppflags = '-ffunction-sections -fdata-sections -g -Os -w'
default_cflags = ''
default_cxxflags = '-fno-exceptions'
default_ldflags = '-Os --gc-sections'
def setup_arg_parser(self, parser):
super(Build, self).setup_arg_parser(parser)
self.e.add_board_model_arg(parser)
self.e.add_arduino_dist_arg(parser)
parser.add_argument('--make', metavar='MAKE',
default=self.default_make,
help='Specifies the make tool to use. If '
'a full path is not given, searches in Arduino '
'directories before PATH. Default: "%(default)s".')
parser.add_argument('--cc', metavar='COMPILER',
default=self.default_cc,
help='Specifies the compiler used for C files. If '
'a full path is not given, searches in Arduino '
'directories before PATH. Default: "%(default)s".')
parser.add_argument('--cxx', metavar='COMPILER',
default=self.default_cxx,
help='Specifies the compiler used for C++ files. '
'If a full path is not given, searches in Arduino '
'directories before PATH. Default: "%(default)s".')
parser.add_argument('--ar', metavar='AR',
default=self.default_ar,
help='Specifies the AR tool to use. If a full path '
'is not given, searches in Arduino directories '
'before PATH. Default: "%(default)s".')
parser.add_argument('--objcopy', metavar='OBJCOPY',
default=self.default_objcopy,
help='Specifies the OBJCOPY to use. If a full path '
'is not given, searches in Arduino directories '
'before PATH. Default: "%(default)s".')
parser.add_argument('-f', '--cppflags', metavar='FLAGS',
default=self.default_cppflags,
help='Flags that will be passed to the compiler. '
'Note that multiple (space-separated) flags must '
'be surrounded by quotes, e.g. '
'`--cppflags="-DC1 -DC2"\' specifies flags to define '
'the constants C1 and C2. Default: "%(default)s".')
parser.add_argument('--cflags', metavar='FLAGS',
default=self.default_cflags,
help='Like --cppflags, but the flags specified are '
'only passed to compilations of C source files. '
'Default: "%(default)s".')
parser.add_argument('--cxxflags', metavar='FLAGS',
default=self.default_cxxflags,
help='Like --cppflags, but the flags specified '
'are only passed to compilations of C++ source '
'files. Default: "%(default)s".')
parser.add_argument('--ldflags', metavar='FLAGS',
default=self.default_ldflags,
help='Like --cppflags, but the flags specified '
'are only passed during the linking stage. Note '
'these flags should be specified as if `ld\' were '
'being invoked directly (i.e. the `-Wl,\' prefix '
'should be omitted). Default: "%(default)s".')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Verbose make output')
def discover(self, args):
board = self.e.board_model(args.board_model)
core_place = os.path.join(board['_coredir'], 'cores', board['build']['core'])
core_header = 'Arduino.h' if self.e.arduino_lib_version.major else 'WProgram.h'
self.e.find_dir('arduino_core_dir', [core_header], [core_place],
human_name='Arduino core library')
if self.e.arduino_lib_version.major:
variants_place = os.path.join(board['_coredir'], 'variants')
self.e.find_dir('arduino_variants_dir', ['.'], [variants_place],
human_name='Arduino variants directory')
self.e.find_arduino_dir('arduino_libraries_dir', ['libraries'],
human_name='Arduino standard libraries')
toolset = [
('make', args.make),
('cc', args.cc),
('cxx', args.cxx),
('ar', args.ar),
('objcopy', args.objcopy),
]
for tool_key, tool_binary in toolset:
self.e.find_arduino_tool(
tool_key, ['hardware', 'tools', 'avr', 'bin'],
items=[tool_binary], human_name=tool_binary)
def setup_flags(self, args):
board = self.e.board_model(args.board_model)
mcu = '-mmcu=' + board['build']['mcu']
# Hard-code the flags that are essential to building the sketch
self.e['cppflags'] = SpaceList([
mcu,
'-DF_CPU=' + board['build']['f_cpu'],
'-DARDUINO=' + str(self.e.arduino_lib_version.as_int()),
'-I' + self.e['arduino_core_dir'],
])
# Add additional flags as specified
self.e['cppflags'] += SpaceList(shlex.split(args.cppflags))
if 'vid' in board['build']:
self.e['cppflags'].append('-DUSB_VID=%s' % board['build']['vid'])
if 'pid' in board['build']:
self.e['cppflags'].append('-DUSB_PID=%s' % board['build']['pid'])
if self.e.arduino_lib_version.major:
variant_dir = os.path.join(self.e.arduino_variants_dir,
board['build']['variant'])
self.e.cppflags.append('-I' + variant_dir)
self.e['cflags'] = SpaceList(shlex.split(args.cflags))
self.e['cxxflags'] = SpaceList(shlex.split(args.cxxflags))
# Again, hard-code the flags that are essential to building the sketch
self.e['ldflags'] = SpaceList([mcu])
self.e['ldflags'] += SpaceList([
'-Wl,' + flag for flag in shlex.split(args.ldflags)
])
self.e['names'] = {
'obj': '%s.o',
'lib': 'lib%s.a',
'cpp': '%s.cpp',
'deps': '%s.d',
}
def create_jinja(self, verbose):
templates_dir = os.path.join(os.path.dirname(__file__), '..', 'make')
self.jenv = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
undefined=StrictUndefined, # bark on Undefined render
extensions=['jinja2.ext.do'])
# inject @filters from ino.filters
for name, f in inspect.getmembers(ino.filters, lambda x: getattr(x, 'filter', False)):
self.jenv.filters[name] = f
# inject globals
self.jenv.globals['e'] = self.e
self.jenv.globals['v'] = '' if verbose else '@'
self.jenv.globals['slash'] = os.path.sep
self.jenv.globals['SpaceList'] = SpaceList
def render_template(self, source, target, **ctx):
template = self.jenv.get_template(source)
contents = template.render(**ctx)
out_path = os.path.join(self.e.build_dir, target)
with open(out_path, 'wt') as f:
f.write(contents)
return out_path
def make(self, makefile, **kwargs):
makefile = self.render_template(makefile + '.jinja', makefile, **kwargs)
ret = subprocess.call([self.e.make, '-f', makefile, 'all'])
if ret != 0:
raise Abort("Make failed with code %s" % ret)
def recursive_inc_lib_flags(self, libdirs):
flags = SpaceList()
for d in libdirs:
flags.append('-I' + d)
flags.extend('-I' + subd for subd in list_subdirs(d, recursive=True, exclude=['examples', 'src']))
return flags
def _scan_dependencies(self, dir, lib_dirs, inc_flags):
output_filepath = os.path.join(self.e.build_dir, os.path.basename(dir), 'dependencies.d')
self.make('Makefile.deps', inc_flags=inc_flags, src_dir=dir, output_filepath=output_filepath)
self.e['deps'].append(output_filepath)
# search for dependencies on libraries
# for this scan dependency file generated by make
# with regexes to find entries that start with
# libraries dirname
regexes = dict((lib, re.compile(r'\s' + lib + re.escape(os.path.sep))) for lib in lib_dirs)
used_libs = set()
with open(output_filepath) as f:
for line in f:
for lib, regex in regexes.iteritems():
if regex.search(line) and lib != dir:
used_libs.add(lib)
return used_libs
def scan_dependencies(self):
self.e['deps'] = SpaceList()
lib_dirs = [self.e.arduino_core_dir] + list_subdirs(self.e.lib_dir) + list_subdirs(self.e.arduino_libraries_dir)
inc_flags = self.recursive_inc_lib_flags(lib_dirs)
# If lib A depends on lib B it have to appear before B in final
# list so that linker could link all together correctly
# but order of `_scan_dependencies` is not defined, so...
# 1. Get dependencies of sources in arbitrary order
used_libs = list(self._scan_dependencies(self.e.src_dir, lib_dirs, inc_flags))
# 2. Get dependencies of dependency libs themselves: existing dependencies
# are moved to the end of list maintaining order, new dependencies are appended
scanned_libs = set()
while scanned_libs != set(used_libs):
for lib in set(used_libs) - scanned_libs:
dep_libs = self._scan_dependencies(lib, lib_dirs, inc_flags)
i = 0
for ulib in used_libs[:]:
if ulib in dep_libs:
# dependency lib used already, move it to the tail
used_libs.append(used_libs.pop(i))
dep_libs.remove(ulib)
else:
i += 1
# append new dependencies to the tail
used_libs.extend(dep_libs)
scanned_libs.add(lib)
self.e['used_libs'] = used_libs
self.e['cppflags'].extend(self.recursive_inc_lib_flags(used_libs))
def run(self, args):
self.discover(args)
self.setup_flags(args)
self.create_jinja(verbose=args.verbose)
self.make('Makefile.sketch')
self.scan_dependencies()
self.make('Makefile')
| |
# this module is an OS/2 oriented replacement for the pwd standard
# extension module.
# written by Andrew MacIntyre, April 2001.
# updated July 2003, adding field accessor support
# note that this implementation checks whether ":" or ";" as used as
# the field separator character. Path conversions are are applied when
# the database uses ":" as the field separator character.
"""Replacement for pwd standard extension module, intended for use on
OS/2 and similar systems which don't normally have an /etc/passwd file.
The standard Unix password database is an ASCII text file with 7 fields
per record (line), separated by a colon:
- user name (string)
- password (encrypted string, or "*" or "")
- user id (integer)
- group id (integer)
- description (usually user's name)
- home directory (path to user's home directory)
- shell (path to the user's login shell)
(see the section 8.1 of the Python Library Reference)
This implementation differs from the standard Unix implementation by
allowing use of the platform's native path separator character - ';' on OS/2,
DOS and MS-Windows - as the field separator in addition to the Unix
standard ":". Additionally, when ":" is the separator path conversions
are applied to deal with any munging of the drive letter reference.
The module looks for the password database at the following locations
(in order first to last):
- ${ETC_PASSWD} (or %ETC_PASSWD%)
- ${ETC}/passwd (or %ETC%/passwd)
- ${PYTHONHOME}/Etc/passwd (or %PYTHONHOME%/Etc/passwd)
Classes
-------
None
Functions
---------
getpwuid(uid) - return the record for user-id uid as a 7-tuple
getpwnam(name) - return the record for user 'name' as a 7-tuple
getpwall() - return a list of 7-tuples, each tuple being one record
(NOTE: the order is arbitrary)
Attributes
----------
passwd_file - the path of the password database file
"""
import os
# try and find the passwd file
__passwd_path = []
if 'ETC_PASSWD' in os.environ:
__passwd_path.append(os.environ['ETC_PASSWD'])
if 'ETC' in os.environ:
__passwd_path.append('%s/passwd' % os.environ['ETC'])
if 'PYTHONHOME' in os.environ:
__passwd_path.append('%s/Etc/passwd' % os.environ['PYTHONHOME'])
passwd_file = None
for __i in __passwd_path:
try:
__f = open(__i, 'r')
__f.close()
passwd_file = __i
break
except:
pass
# path conversion handlers
def __nullpathconv(path):
return path.replace(os.altsep, os.sep)
def __unixpathconv(path):
# two known drive letter variations: "x;" and "$x"
if path[0] == '$':
conv = path[1] + ':' + path[2:]
elif path[1] == ';':
conv = path[0] + ':' + path[2:]
else:
conv = path
return conv.replace(os.altsep, os.sep)
# decide what field separator we can try to use - Unix standard, with
# the platform's path separator as an option. No special field conversion
# handler is required when using the platform's path separator as field
# separator, but are required for the home directory and shell fields when
# using the standard Unix (":") field separator.
__field_sep = {':': __unixpathconv}
if os.pathsep:
if os.pathsep != ':':
__field_sep[os.pathsep] = __nullpathconv
# helper routine to identify which separator character is in use
def __get_field_sep(record):
fs = None
for c in __field_sep.keys():
# there should be 6 delimiter characters (for 7 fields)
if record.count(c) == 6:
fs = c
break
if fs:
return fs
else:
raise KeyError('>> passwd database fields not delimited <<')
# class to match the new record field name accessors.
# the resulting object is intended to behave like a read-only tuple,
# with each member also accessible by a field name.
class Passwd:
def __init__(self, name, passwd, uid, gid, gecos, dir, shell):
self.__dict__['pw_name'] = name
self.__dict__['pw_passwd'] = passwd
self.__dict__['pw_uid'] = uid
self.__dict__['pw_gid'] = gid
self.__dict__['pw_gecos'] = gecos
self.__dict__['pw_dir'] = dir
self.__dict__['pw_shell'] = shell
self.__dict__['_record'] = (self.pw_name, self.pw_passwd,
self.pw_uid, self.pw_gid,
self.pw_gecos, self.pw_dir,
self.pw_shell)
def __len__(self):
return 7
def __getitem__(self, key):
return self._record[key]
def __setattr__(self, name, value):
raise AttributeError('attribute read-only: %s' % name)
def __repr__(self):
return str(self._record)
def __cmp__(self, other):
this = str(self._record)
if this == other:
return 0
elif this < other:
return -1
else:
return 1
# read the whole file, parsing each entry into tuple form
# with dictionaries to speed recall by UID or passwd name
def __read_passwd_file():
if passwd_file:
passwd = open(passwd_file, 'r')
else:
raise KeyError('>> no password database <<')
uidx = {}
namx = {}
sep = None
while True:
entry = passwd.readline().strip()
if len(entry) > 6:
if sep is None:
sep = __get_field_sep(entry)
fields = entry.split(sep)
for i in (2, 3):
fields[i] = int(fields[i])
for i in (5, 6):
fields[i] = __field_sep[sep](fields[i])
record = Passwd(*fields)
if fields[2] not in uidx:
uidx[fields[2]] = record
if fields[0] not in namx:
namx[fields[0]] = record
elif len(entry) > 0:
pass # skip empty or malformed records
else:
break
passwd.close()
if len(uidx) == 0:
raise KeyError
return (uidx, namx)
# return the passwd database entry by UID
def getpwuid(uid):
u, n = __read_passwd_file()
return u[uid]
# return the passwd database entry by passwd name
def getpwnam(name):
u, n = __read_passwd_file()
return n[name]
# return all the passwd database entries
def getpwall():
u, n = __read_passwd_file()
return n.values()
# test harness
if __name__ == '__main__':
getpwall()
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line scripts in the top-level I{bin/} directory.
Tests for actual functionality belong elsewhere, written in a way that doesn't
involve launching child processes.
"""
from os import devnull, getcwd, chdir
from sys import executable
from subprocess import PIPE, Popen
from twisted.trial.unittest import SkipTest, TestCase
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
def outputFromPythonScript(script, *args):
"""
Synchronously run a Python script, with the same Python interpreter that
ran the process calling this function, using L{Popen}, using the given
command-line arguments, with standard input and standard error both
redirected to L{os.devnull}, and return its output as a string.
@param script: The path to the script.
@type script: L{FilePath}
@param args: The command-line arguments to follow the script in its
invocation (the desired C{sys.argv[1:]}).
@type args: L{tuple} of L{str}
@return: the output passed to the proces's C{stdout}, without any messages
from C{stderr}.
@rtype: L{bytes}
"""
nullInput = file(devnull, "rb")
nullError = file(devnull, "wb")
stdout = Popen([executable, script.path] + list(args),
stdout=PIPE, stderr=nullError, stdin=nullInput).stdout.read()
nullInput.close()
nullError.close()
return stdout
class ScriptTestsMixin:
"""
Mixin for L{TestCase} subclasses which defines a helper function for testing
a Twisted-using script.
"""
bin = getModule("twisted").pathEntry.filePath.child("bin")
def scriptTest(self, name):
"""
Verify that the given script runs and uses the version of Twisted
currently being tested.
This only works when running tests against a vcs checkout of Twisted,
since it relies on the scripts being in the place they are kept in
version control, and exercises their logic for finding the right version
of Twisted to use in that situation.
@param name: A path fragment, relative to the I{bin} directory of a
Twisted source checkout, identifying a script to test.
@type name: C{str}
@raise SkipTest: if the script is not where it is expected to be.
"""
script = self.bin.preauthChild(name)
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
from twisted.copyright import version
scriptVersion = outputFromPythonScript(script, '--version')
self.assertIn(str(version), scriptVersion)
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the core scripts.
"""
def test_twistd(self):
self.scriptTest("twistd")
def test_twistdPathInsert(self):
"""
The twistd script adds the current working directory to sys.path so
that it's able to import modules from it.
"""
script = self.bin.child("twistd")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("bar.tac").setContent(
"import sys\n"
"print sys.path\n")
output = outputFromPythonScript(script, '-ny', 'bar.tac')
self.assertIn(repr(testDir.path), output)
def test_manhole(self):
self.scriptTest("manhole")
def test_trial(self):
self.scriptTest("trial")
def test_trialPathInsert(self):
"""
The trial script adds the current working directory to sys.path so that
it's able to import modules from it.
"""
script = self.bin.child("trial")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("foo.py").setContent("")
output = outputFromPythonScript(script, 'foo')
self.assertIn("PASSED", output)
def test_pyhtmlizer(self):
self.scriptTest("pyhtmlizer")
def test_tap2rpm(self):
self.scriptTest("tap2rpm")
def test_tap2deb(self):
self.scriptTest("tap2deb")
def test_tapconvert(self):
self.scriptTest("tapconvert")
def test_deprecatedTkunzip(self):
"""
The entire L{twisted.scripts.tkunzip} module, part of the old Windows
installer tool chain, is deprecated.
"""
from twisted.scripts import tkunzip
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedTkunzip])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"twisted.scripts.tkunzip was deprecated in Twisted 11.1.0: "
"Seek unzipping software outside of Twisted.",
warnings[0]['message'])
self.assertEqual(1, len(warnings))
def test_deprecatedTapconvert(self):
"""
The entire L{twisted.scripts.tapconvert} module is deprecated.
"""
from twisted.scripts import tapconvert
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedTapconvert])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"twisted.scripts.tapconvert was deprecated in Twisted 12.1.0: "
"tapconvert has been deprecated.",
warnings[0]['message'])
self.assertEqual(1, len(warnings))
class ZshIntegrationTestCase(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('twistd', 'twisted.scripts.twistd.ServerOptions'),
('trial', 'twisted.scripts.trial.Options'),
('pyhtmlizer', 'twisted.scripts.htmlizer.Options'),
('tap2rpm', 'twisted.scripts.tap2rpm.MyOptions'),
('tap2deb', 'twisted.scripts.tap2deb.MyOptions'),
('tapconvert', 'twisted.scripts.tapconvert.ConvertOptions'),
('manhole', 'twisted.scripts.manhole.MyOptions')
]
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for SpacetoDepth op."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class SpaceToDepthTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs, dtype=dtypes.float32):
input_nhwc = math_ops.cast(inputs, dtype)
# test NHWC (default)
x_tf = array_ops.space_to_depth(input_nhwc, block_size)
self.assertAllEqual(self.evaluate(x_tf), outputs)
if test_util.is_gpu_available():
with test_util.force_gpu():
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.space_to_depth(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(self.evaluate(output_nhwc), outputs)
def testBasic(self):
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4]]]]
for dtype in [dtypes.float32, dtypes.float16, dtypes.uint8]:
self._testOne(x_np, block_size, x_out, dtype=dtype)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testLargerInput2x2(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12],
[13, 14, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered in depth. Here, larger block size.
def testLargerInput4x4(self):
x_np = [[[[1], [2], [5], [6]], [[3], [4], [7], [8]],
[[9], [10], [13], [14]], [[11], [12], [15], [16]]]]
block_size = 4
x_out = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions AND for larger input depths.
# To make sure elements are properly interleaved in depth and ordered
# spatially.
def testDepthInterleavedLarge(self):
x_np = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40], [5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
def batch_output_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i], [5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
def testBatchSize0(self):
block_size = 2
batch_size = 0
x_np = array_ops.ones([batch_size, 4, 6, 3])
x_out = array_ops.ones([batch_size, 2, 3, 12])
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10], [2, 20]], [[3, 30], [4, 40]], [[5, 50], [6, 60]],
[[7, 70], [8, 80]], [[9, 90], [10, 100]], [[11, 110], [12, 120]]]]
block_size = 2
x_out = [[[[1, 10, 2, 20, 3, 30, 4, 40]], [[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
def testInputWrongDimMissingDepth(self):
# The input is missing the last dimension ("depth")
x_np = [[[1, 2], [3, 4]]]
block_size = 2
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
def testInputWrongDimMissingBatch(self):
# The input is missing the first dimension ("batch")
x_np = [[[1], [2]], [[3], [4]]]
block_size = 2
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSize0(self):
# The block size is 0.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 0
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
def testBlockSizeOne(self):
# The block size is 1. The block size needs to be > 1.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 1
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
def testBlockSizeLarger(self):
# The block size is too large for this input.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 10
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
out_tf = array_ops.space_to_depth(x_np, block_size)
self.evaluate(out_tf)
def testBlockSizeNotDivisibleWidth(self):
# The block size divides width but not height.
x_np = [[[[1], [2], [3]], [[3], [4], [7]]]]
block_size = 3
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleHeight(self):
# The block size divides height but not width.
x_np = [[[[1], [2]], [[3], [4]], [[5], [6]]]]
block_size = 3
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = array_ops.space_to_depth(x_np, block_size)
def testBlockSizeNotDivisibleBoth(self):
# The block size does not divide neither width or height.
x_np = [[[[1], [2]], [[3], [4]]]]
block_size = 3
with self.assertRaises((ValueError, errors.InvalidArgumentError)):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
# Testing an unkown shape in graph.
with ops.Graph().as_default():
t = array_ops.space_to_depth(
array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
def spaceToDepthUsingTranspose(self, tensor, block_size, data_format):
block_size_sq = block_size * block_size
dtype = tensor.dtype
if dtype == dtypes.qint8:
tensor = array_ops.bitcast(tensor, dtypes.int8)
if data_format == "NHWC":
b, ih, iw, ic = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, oh, block_size, ow, block_size, ic])
tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])
tensor = array_ops.reshape(tensor, [b, oh, ow, oc])
elif data_format == "NCHW":
b, ic, ih, iw = tensor.shape.as_list()
assert ih % block_size == 0, (ih, block_size)
assert iw % block_size == 0, (iw, block_size)
ow, oh, oc = iw // block_size, ih // block_size, ic * block_size_sq
tensor = array_ops.reshape(tensor,
[b, ic, oh, block_size, ow, block_size])
tensor = array_ops.transpose(tensor, [0, 3, 5, 1, 2, 4])
tensor = array_ops.reshape(tensor, [b, oc, oh, ow])
if dtype == dtypes.qint8:
tensor = array_ops.bitcast(tensor, dtype)
return tensor
def compareToTranspose(self, batch_size, out_height, out_width, in_channels,
block_size, data_format, data_type, use_gpu):
in_height = out_height * block_size
in_width = out_width * block_size
nhwc_input_shape = [batch_size, in_height, in_width, in_channels]
nchw_input_shape = [batch_size, in_channels, in_height, in_width]
total_size = np.prod(nhwc_input_shape)
# Construct the input tensor in data_type and NHWC.
# force_cpu is needed because quantize_v2 runs on only CPU.
with test_util.force_cpu():
if data_type == dtypes.qint8:
# Initialize the input tensor with qint8 values that circle -127..127.
x = [((f + 128) % 255) - 127 for f in range(total_size)]
t = constant_op.constant(
x, shape=nhwc_input_shape, dtype=dtypes.float32)
t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)
else:
assert data_type == dtypes.float32
# Initialize the input tensor with ascending whole numbers as floats.
x = [f * 1.0 for f in range(total_size)]
shape = nchw_input_shape if data_format == "NCHW" else nhwc_input_shape
t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)
with test_util.device(use_gpu):
if data_format == "NCHW_VECT_C":
assert data_type == dtypes.qint8
# Convert to int8, then NHWCToNCHW_VECT_C, and then back to qint8.
actual = array_ops.bitcast(t, dtypes.int8)
actual = test_util.NHWCToNCHW_VECT_C(actual)
actual = array_ops.bitcast(actual, dtypes.qint8)
actual = array_ops.space_to_depth(
actual, block_size, data_format=data_format)
actual = array_ops.bitcast(actual, dtypes.int8)
actual = test_util.NCHW_VECT_CToNHWC(actual)
actual = array_ops.bitcast(actual, dtypes.qint8)
expected = array_ops.bitcast(t, dtypes.int8)
expected = math_ops.cast(expected, dtypes.float32)
expected = self.spaceToDepthUsingTranspose(expected, block_size, "NHWC")
expected = math_ops.cast(expected, dtypes.int8)
expected = array_ops.bitcast(expected, dtypes.qint8)
else:
# Initialize the input tensor with ascending whole numbers as floats.
actual = array_ops.space_to_depth(
t, block_size, data_format=data_format)
expected = self.spaceToDepthUsingTranspose(t, block_size, data_format)
actual_vals, expected_vals = self.evaluate([actual, expected])
self.assertTrue(np.array_equal(actual_vals, expected_vals))
@test_util.disable_tfrt("b/169901260")
def testAgainstTranspose(self):
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.float32, False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", dtypes.float32, False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", dtypes.float32, False)
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.qint8, False)
self.compareToTranspose(1, 2, 3, 2, 2, "NHWC", dtypes.qint8, False)
self.compareToTranspose(1, 2, 3, 2, 3, "NHWC", dtypes.qint8, False)
if not test.is_gpu_available():
tf_logging.info("skipping gpu tests since gpu not available")
return
self.compareToTranspose(3, 2, 3, 1, 2, "NHWC", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 2, 2, "NHWC", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 1, 2, "NCHW", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 2, 3, "NCHW", dtypes.float32, True)
self.compareToTranspose(5, 7, 11, 3, 2, "NCHW", dtypes.float32, True)
self.compareToTranspose(3, 2, 3, 4, 2, "NCHW_VECT_C", dtypes.qint8, True)
self.compareToTranspose(3, 2, 3, 8, 3, "NCHW_VECT_C", dtypes.qint8, True)
self.compareToTranspose(5, 7, 11, 12, 2, "NCHW_VECT_C", dtypes.qint8, True)
class SpaceToDepthGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size, data_format):
# NCHW is implemented for only GPU.
if data_format == "NCHW" and not test.is_gpu_available():
return
assert 4 == x.ndim
def func(x):
return array_ops.space_to_depth(x, block_size, data_format=data_format)
with test_util.use_gpu():
with self.cached_session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, [ops.convert_to_tensor(x)])
self.assertAllClose(theoretical, numerical, rtol=1e-2, atol=1e-2)
# Tests a gradient for space_to_depth of x which is a four dimensional
# tensor of shape [b, h * block_size, w * block_size, d].
def _compare(self, b, h, w, d, block_size, data_format):
block_size_sq = block_size * block_size
data = np.random.normal(0, 1, b * h * w * d * block_size_sq).astype(
np.float32)
if data_format == "NHWC":
x = data.reshape([b, h * block_size, w * block_size, d])
else:
x = data.reshape([b, d, h * block_size, w * block_size])
self._checkGrad(x, block_size, data_format)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(1, 2, 3, 5, block_size, "NHWC")
self._compare(1, 2, 3, 5, block_size, "NCHW")
@test_util.run_deprecated_v1
def testSmall2(self):
block_size = 2
self._compare(2, 4, 3, 2, block_size, "NHWC")
self._compare(2, 4, 3, 2, block_size, "NCHW")
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Scenario Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ScenarioModel",
"S3ScenarioAssetModel",
"S3ScenarioHRModel",
"S3ScenarioMapModel",
"S3ScenarioSiteModel",
"S3ScenarioTaskModel",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ScenarioModel(S3Model):
"""
Scenario Model
http://eden.sahanafoundation.org/wiki/BluePrintScenario
Link tables are in separate classes to increase performance & allow
the system to be more modular
"""
names = ["scenario_scenario",
"scenario_scenario_id",
]
def model(self):
T = current.T
db = current.db
add_component = self.add_component
# ---------------------------------------------------------------------
# Scenarios
#
# Scenarios are Templates for Events to plan what resources are required
#
tablename = "scenario_scenario"
table = self.define_table(tablename,
Field("name", notnull=True,
length=64, # Mayon compatiblity
label=T("Name")),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
# Open Map Config to set the default Location
create_next=URL(args=["[id]", "config"]),
deduplicate=self.scenario_scenario_duplicate,
)
# CRUD strings
ADD_SCENARIO = T("New Scenario")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_SCENARIO,
title_display = T("Scenario Details"),
title_list = T("Scenarios"),
title_update = T("Edit Scenario"),
title_search = T("Search Scenarios"),
subtitle_create = T("Add New Scenario"),
label_list_button = T("List Scenarios"),
label_create_button = ADD_SCENARIO,
label_delete_button = T("Delete Scenario"),
msg_record_created = T("Scenario added"),
msg_record_modified = T("Scenario updated"),
msg_record_deleted = T("Scenario deleted"),
msg_list_empty = T("No Scenarios currently registered"))
# Components
# Tasks
add_component("project_task",
scenario_scenario=Storage(
link="scenario_task",
joinby="scenario_id",
key="task_id",
# @ToDo: Widget to handle embedded LocationSelector
#actuate="embed",
actuate="link",
autocomplete="name",
autodelete=False))
# Human Resources
add_component("hrm_human_resource",
scenario_scenario=Storage(
link="scenario_human_resource",
joinby="scenario_id",
key="human_resource_id",
# @ToDo: Widget to handle embedded AddPersonWidget
#actuate="embed",
actuate="link",
autocomplete="name",
autodelete=False))
# Assets
add_component("asset_asset",
scenario_scenario=Storage(
link="scenario_asset",
joinby="scenario_id",
key="asset_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Facilities
add_component("scenario_site",
scenario_scenario="scenario_id")
# Map Config as a component of Scenarios
add_component("gis_config",
scenario_scenario=Storage(
link="scenario_config",
joinby="scenario_id",
multiple=False,
key="config_id",
actuate="replace",
autocomplete="name",
autodelete=True))
scenario_id = S3ReusableField("scenario_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "scenario_scenario.id",
"%(name)s",
orderby="scenario_scenario.name",
sort=True)),
represent = lambda id: \
(id and [db(db.scenario_scenario.id == id).select(db.scenario_scenario.name,
limitby=(0, 1)).first().name] or [current.messages.NONE])[0],
label = T("Scenario"),
ondelete = "SET NULL",
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget()
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Scenario"),
# T("Enter some characters to bring up a list of possible matches")))
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage(
scenario_scenario_id = scenario_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return Storage(
scenario_scenario_id = S3ReusableField("scenario_id",
"integer",
readable=False,
writable=False),
)
# -------------------------------------------------------------------------
@staticmethod
def scenario_scenario_duplicate(job):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
Rules for finding a duplicate:
- If the name exists then it's a duplicate
"""
if job.tablename == "scenario_scenario":
table = job.table
if "name" in job.data:
name = job.data.name
else:
return
query = (table.name == name)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# =============================================================================
class S3ScenarioAssetModel(S3Model):
"""
Scenario Asset Model
"""
names = ["scenario_asset"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Assets
# @ToDo: Use generic Supply Items not Asset instances? (Typed resources)
# Depends on the scale of the scenario!
# So support both...
# @ToDo: Search Widget
tablename = "scenario_asset"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.asset_asset_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_search = T("Search Assets"),
subtitle_create = T("Add New Asset"),
label_list_button = T("List Assets"),
label_create_button = T("Add Asset"),
label_delete_button = T("Remove Asset from this scenario"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset removed"),
msg_list_empty = T("No Assets currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioHRModel(S3Model):
"""
Scenario Human Resources Model
"""
names = ["scenario_human_resource"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Staff/Volunteers
# @ToDo: Use Positions, not individual HRs (Typed resources?)
# @ToDo: Search Widget
tablename = "scenario_human_resource"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.hrm_human_resource_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Human Resource"),
title_display = T("Human Resource Details"),
title_list = T("Human Resources"),
title_update = T("Edit Human Resource"),
title_search = T("Search Human Resources"),
subtitle_create = T("Add New Human Resource"),
label_list_button = T("List Human Resources"),
label_create_button = T("Add Human Resource"),
label_delete_button = T("Remove Human Resource from this scenario"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Human Resource updated"),
msg_record_deleted = T("Human Resource removed"),
msg_list_empty = T("No Human Resources currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioMapModel(S3Model):
"""
Scenario Map Model
"""
names = ["scenario_config"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Link Table for Map Config used in this Scenario
# @ToDo: Widget suitable for a 1-1 relationship where we can assume
# that the Config is pre-created
tablename = "scenario_config"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.gis_config_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Map Configuration"),
title_display = T("Map Configuration Details"),
title_list = T("Map Configurations"),
title_update = T("Edit Map Configuration"),
title_search = T("Search Map Configurations"),
subtitle_create = T("Add New Map Configuration"),
label_list_button = T("List Map Configurations"),
label_create_button = T("Add Map Configuration"),
label_delete_button = T("Remove Map Configuration from this scenario"),
msg_record_created = T("Map Configuration added"),
msg_record_modified = T("Map Configuration updated"),
msg_record_deleted = T("Map Configuration removed"),
msg_list_empty = T("No Map Configurations currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioSiteModel(S3Model):
"""
Scenario Facility Model
"""
names = ["scenario_site"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Facilities
# @ToDo: Search Widget
tablename = "scenario_site"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.org_site_id,
*s3_meta_fields())
table.site_id.readable = table.site_id.writable = True
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Facility"),
title_display = T("Facility Details"),
title_list = T("Facilities"),
title_update = T("Edit Facility"),
title_search = T("Search Facilities"),
subtitle_create = T("Add New Facility"),
label_list_button = T("List Facilities"),
label_create_button = T("Add Facility"),
label_delete_button = T("Remove Facility from this scenario"),
msg_record_created = T("Facility added"),
msg_record_modified = T("Facility updated"),
msg_record_deleted = T("Facility removed"),
msg_list_empty = T("No Facilities currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioTaskModel(S3Model):
"""
Scenario Tasks Model
"""
names = ["scenario_task"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Tasks
# Standing Tasks required for this Scenario
# @ToDo: Search Widget
tablename = "scenario_task"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.project_task_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Task"),
title_display = T("Task Details"),
title_list = T("Tasks"),
title_update = T("Edit Task"),
title_search = T("Search Tasks"),
subtitle_create = T("Add New Task"),
label_list_button = T("List Tasks"),
label_create_button = T("Add Task"),
label_delete_button = T("Remove Task from this scenario"),
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task removed"),
msg_list_empty = T("No Tasks currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# END =========================================================================
| |
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialised object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import json
import time
import zlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.importlib import import_module
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
modpath = settings.SIGNING_BACKEND
module, attr = modpath.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured(
'Error importing cookie signer %s: "%s"' % (modpath, e))
try:
Signer = getattr(mod, attr)
except AttributeError as e:
raise ImproperlyConfigured(
'Error importing cookie signer %s: "%s"' % (modpath, e))
return Signer('django.http.cookies' + settings.SECRET_KEY, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':'))
def loads(self, data):
return json.loads(data)
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
"""
data = force_bytes(serializer().dumps(obj))
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[0] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(force_str(data))
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.sep = str(sep)
self.key = str(key or settings.SECRET_KEY)
self.salt = str(salt or
'%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if not self.sep in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" tests for supporting multiple NIC's in advanced zone with security groups in cloudstack 4.14.0.0
"""
# Import Local Modules
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.sshClient import SshClient
from marvin.lib.utils import (validateList,
cleanup_resources,
get_host_credentials,
get_process_status,
execute_command_in_host,
random_gen)
from marvin.lib.base import (PhysicalNetwork,
Account,
Host,
TrafficType,
Domain,
Network,
NetworkOffering,
VirtualMachine,
ServiceOffering,
Zone,
NIC,
SecurityGroup)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines,
list_routers,
list_hosts,
get_free_vlan)
from marvin.codes import (PASS, FAILED)
import logging
import random
import time
class TestMulipleNicSupport(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestMulipleNicSupport,
cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.services = cls.testClient.getParsedTestDataConfig()
zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls._cleanup = []
cls.skip = False
if str(cls.zone.securitygroupsenabled) != "True":
cls.skip = True
return
cls.logger = logging.getLogger("TestMulipleNicSupport")
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
# Get Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.services['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, hypervisor="KVM")
if cls.template == FAILED:
cls.skip = True
return
# Create new domain, account, network and VM
cls.user_domain = Domain.create(
cls.apiclient,
services=cls.testdata["acl"]["domain2"],
parentdomainid=cls.domain.id)
# Create account
cls.account1 = Account.create(
cls.apiclient,
cls.testdata["acl"]["accountD2"],
admin=True,
domainid=cls.user_domain.id
)
# Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offerings"]["small"]
)
cls._cleanup.append(cls.service_offering)
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
# Enable Network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls._cleanup.append(cls.network_offering)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
if cls.zone.securitygroupsenabled:
# Enable networking for reaching to VM thorugh SSH
security_group = SecurityGroup.create(
cls.apiclient,
cls.testdata["security_group"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
# Authorize Security group to SSH to VM
ingress_rule2 = security_group.authorize(
cls.apiclient,
cls.testdata["ingress_rule_ICMP"],
account=cls.account1.name,
domainid=cls.account1.domainid
)
cls.testdata["shared_network_offering_sg"]["specifyVlan"] = 'True'
cls.testdata["shared_network_offering_sg"]["specifyIpRanges"] = 'True'
cls.shared_network_offering = NetworkOffering.create(
cls.apiclient,
cls.testdata["shared_network_offering_sg"],
conservemode=False
)
NetworkOffering.update(
cls.shared_network_offering,
cls.apiclient,
id=cls.shared_network_offering.id,
state="enabled"
)
physical_network, vlan = get_free_vlan(cls.apiclient, cls.zone.id)
cls.testdata["shared_network_sg"]["physicalnetworkid"] = physical_network.id
random_subnet_number = random.randrange(90, 99)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network1 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(100, 110)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network2 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
random_subnet_number = random.randrange(111, 120)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["displaytext"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["vlan"] = "vlan://" + str(random_subnet_number)
cls.testdata["shared_network_sg"]["startip"] = "192.168." + str(random_subnet_number) + ".240"
cls.testdata["shared_network_sg"]["endip"] = "192.168." + str(random_subnet_number) + ".250"
cls.testdata["shared_network_sg"]["gateway"] = "192.168." + str(random_subnet_number) + ".254"
cls.network3 = Network.create(
cls.apiclient,
cls.testdata["shared_network_sg"],
networkofferingid=cls.shared_network_offering.id,
zoneid=cls.zone.id,
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
try:
cls.virtual_machine1 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=cls.network1.id
)
for nic in cls.virtual_machine1.nic:
if nic.isdefault:
cls.virtual_machine1.ssh_ip = nic.ipaddress
cls.virtual_machine1.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % e)
try:
cls.virtual_machine2 = VirtualMachine.create(
cls.apiclient,
cls.testdata["virtual_machine"],
accountid=cls.account1.name,
domainid=cls.account1.domainid,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id,
securitygroupids=[security_group.id],
networkids=[str(cls.network1.id), str(cls.network2.id)]
)
for nic in cls.virtual_machine2.nic:
if nic.isdefault:
cls.virtual_machine2.ssh_ip = nic.ipaddress
cls.virtual_machine2.default_network_id = nic.networkid
break
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % e)
cls._cleanup.append(cls.virtual_machine1)
cls._cleanup.append(cls.virtual_machine2)
cls._cleanup.append(cls.network1)
cls._cleanup.append(cls.network2)
cls._cleanup.append(cls.network3)
cls._cleanup.append(cls.shared_network_offering)
if cls.zone.securitygroupsenabled:
cls._cleanup.append(security_group)
cls._cleanup.append(cls.account1)
cls._cleanup.append(cls.user_domain)
@classmethod
def tearDownClass(self):
try:
cleanup_resources(self.apiclient, self._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
if self.skip:
self.skipTest("Test can be run only on advanced zone and KVM hypervisor")
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def verify_network_rules(self, vm_id):
virtual_machine = VirtualMachine.list(
self.apiclient,
id=vm_id
)
vm = virtual_machine[0]
hosts = list_hosts(
self.apiclient,
id=vm.hostid
)
host = hosts[0]
if host.hypervisor.lower() not in "kvm":
return
host.user, host.password = get_host_credentials(self.config, host.ipaddress)
for nic in vm.nic:
secips = ""
if len(nic.secondaryip) > 0:
for secip in nic.secondaryip:
secips += secip.ipaddress + ";"
command="/usr/share/cloudstack-common/scripts/vm/network/security_group.py verify_network_rules --vmname %s --vmip %s --vmmac %s --nicsecips '%s'" % (vm.instancename, nic.ipaddress, nic.macaddress, secips)
self.logger.debug("Executing command '%s' in host %s" % (command, host.ipaddress))
result=execute_command_in_host(host.ipaddress, 22,
host.user,
host.password,
command)
if len(result) > 0:
self.fail("The iptables/ebtables rules for nic %s on vm %s on host %s are not correct" %(nic.ipaddress, vm.instancename, host.name))
@attr(tags=["adeancedsg"], required_hardware="false")
def test_01_create_vm_with_multiple_nics(self):
"""Create Vm with multiple NIC's
Steps:
# 1. Create more than 1 isolated or shared network
# 2. Create a vm and select more than 1 network while deploying
# 3. Vm is deployed successfully with 1 nic from each network
# 4. All the vm's should be pingable
:return:
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
len(virtual_machine), 1,
"Virtual Machine create with 2 NIC's failed")
nicIdInVm = virtual_machine[0].nic[0]
self.assertIsNotNone(nicIdInVm, "NIC 1 not found in Virtual Machine")
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "NIC 2 not found in Virtual Machine")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_02_add_nic_to_vm(self):
"""Create VM with single NIC and then add additional NIC
Steps:
# 1. Create a VM by selecting one default NIC
# 2. Create few more isolated or shared networks
# 3. Add extra NIC's to the vm from the newly created networks
# 4. The deployed VM should have extra nic's added in the above
# step without any fail
# 5. The IP's of the extra NIC's should be pingable
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network2.id)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
nicIdInVm = virtual_machine[0].nic[1]
self.assertIsNotNone(nicIdInVm, "Second NIC not found")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_03_add_ip_to_default_nic(self):
""" Add secondary IP's to the VM
Steps:
# 1. Create a VM with more than 1 NIC
# 2) Navigate to Instances->NIC->Edit Secondary IP's
# ->Aquire new Secondary IP"
# 3) Add as many secondary Ip as possible to the VM
# 4) Configure the secondary IP's by referring to "Configure
# the secondary IP's" in the "Action Item" section
:return:
"""
ipaddress = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[0].id
)
self.assertIsNotNone(
ipaddress,
"Unable to add secondary IP to the default NIC")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_04_add_ip_to_remaining_nics(self):
""" Add secondary IP's to remaining NIC's
Steps:
# 1) Create a VM with more than 1 NIC
# 2)Navigate to Instances-NIC's->Edit Secondary IP's
# ->Acquire new Secondary IP
# 3) Add secondary IP to all the NIC's of the VM
# 4) Confiugre the secondary IP's by referring to "Configure the
# secondary IP's" in the "Action Item" section
:return:
"""
self.virtual_machine1.add_nic(self.apiclient, self.network3.id)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
self.assertIsNotNone(
vms[0].nic[2],
"Third NIC is not added successfully to the VM")
vms1_nic1_id = vms[0].nic[1]['id']
vms1_nic2_id = vms[0].nic[2]['id']
ipaddress21 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
ipaddress22 = NIC.addIp(
self.apiclient,
id=vms1_nic1_id
)
self.assertIsNotNone(
ipaddress21,
"Unable to add first secondary IP to the second nic")
self.assertIsNotNone(
ipaddress22,
"Unable to add second secondary IP to second NIC")
ipaddress31 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
ipaddress32 = NIC.addIp(
self.apiclient,
id=vms1_nic2_id
)
self.assertIsNotNone(
ipaddress31,
"Unable to add first secondary IP to third NIC")
self.assertIsNotNone(
ipaddress32,
"Unable to add second secondary IP to third NIC")
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_05_stop_start_vm_with_multiple_nic(self):
""" Stop and Start a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to stop/start the VM
# 4) Ping the IP's of the vm
# 5) Remove Secondary IP from one of the NIC
:return:
"""
ipaddress1 = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[0].id
)
ipaddress2 = NIC.addIp(
self.apiclient,
id=self.virtual_machine2.nic[1].id
)
# Stop the VM with multiple NIC's
self.virtual_machine2.stop(self.apiclient)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
virtual_machine[0]['state'], 'Stopped',
"Could not stop the VM with multiple NIC's")
if virtual_machine[0]['state'] == 'Stopped':
# If stopped then try to start the VM
self.virtual_machine2.start(self.apiclient)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertEqual(
virtual_machine[0]['state'], 'Running',
"Could not start the VM with multiple NIC's")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_06_migrate_vm_with_multiple_nic(self):
""" Migrate a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to stop/start the VM
# 4) Ping the IP's of the vm
:return:
"""
# Skipping adding Secondary IP to NIC since its already
# done in the previous test cases
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
old_host_id = virtual_machine[0]['hostid']
try:
hosts = Host.list(
self.apiclient,
virtualmachineid=self.virtual_machine1.id,
listall=True)
self.assertEqual(
validateList(hosts)[0],
PASS,
"hosts list validation failed")
# Get a host which is not already assigned to VM
for host in hosts:
if host.id == old_host_id:
continue
else:
host_id = host.id
break
self.virtual_machine1.migrate(self.apiclient, host_id)
except Exception as e:
self.fail("Exception occured: %s" % e)
# List the vm again
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id)
new_host_id = virtual_machine[0]['hostid']
self.assertNotEqual(
old_host_id, new_host_id,
"Migration of VM to new host failed"
)
self.verify_network_rules(self.virtual_machine1.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_07_remove_secondary_ip_from_nic(self):
""" Remove secondary IP from any NIC
Steps:
# 1) Navigate to Instances
# 2) Select any vm
# 3) NIC's ->Edit secondary IP's->Release IP
# 4) The secondary IP should be successfully removed
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
# Check which NIC is having secondary IP
secondary_ips = virtual_machine[0].nic[1].secondaryip
for secondary_ip in secondary_ips:
NIC.removeIp(self.apiclient, ipaddressid=secondary_ip['id'])
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id
)
self.assertFalse(
virtual_machine[0].nic[1].secondaryip,
'Failed to remove secondary IP')
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_08_remove_nic_from_vm(self):
""" Remove NIC from VM
Steps:
# 1) Navigate to Instances->select any vm->NIC's->NIC 2
# ->Click on "X" button to remove the second NIC
# 2) Remove other NIC's as well from the VM
# 3) All the NIC's should be successfully removed from the VM
:return:
"""
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
for nic in virtual_machine[0].nic:
if nic.isdefault:
continue
self.virtual_machine2.remove_nic(self.apiclient, nic.id)
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine2.id)
self.assertEqual(
len(virtual_machine[0].nic), 1,
"Failed to remove all the nics from the virtual machine")
self.verify_network_rules(self.virtual_machine2.id)
@attr(tags=["advancedsg"], required_hardware="false")
def test_09_reboot_vm_with_multiple_nic(self):
""" Reboot a VM with Multple NIC
Steps:
# 1) Create a Vm with multiple NIC's
# 2) Configure secondary IP's on the VM
# 3) Try to reboot the VM
# 4) Ping the IP's of the vm
:return:
"""
# Skipping adding Secondary IP to NIC since its already
# done in the previous test cases
virtual_machine = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine1.id
)
try:
self.virtual_machine1.reboot(self.apiclient)
except Exception as e:
self.fail("Exception occured: %s" % e)
self.verify_network_rules(self.virtual_machine1.id)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#This is the ultimate statistical tool!
#Calculates pretty much anything you can think of.
#Dependency: Scipy & Numpy
#2008, Sami-Matias Niemi
def process_args():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--output", dest="output",
help="Writes data to file named filename. This is optional choice.", metavar="filename")
parser.add_option("-i", "--input", dest="input",
help="Reads data from input file called filename", metavar="filename")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Verbose mode on. This is optional choice.")
parser.add_option("-s", "--separator", dest="delim",
help="Sets the data separator/delimiter to given char. This is optional choice.", metavar = "separator")
parser.add_option("-c", "--column1", dest="column1",
help="The column1 to be processed. Numbering begins from 0.", metavar = "number")
parser.add_option("-x", "--column2", dest="column2",
help="The column2 to be processed. Numbering begins from 0. This is optional choice!", metavar = "number")
return parser.parse_args()
if __name__ == '__main__':
from scipy.stats import *
#from numpy import * #Maybe not needed?
import numpy as N
import sys
(opts, args) = process_args()
if (opts.input is None):
print "\nYou did not give input file!\nWill exit now!\n"
sys.exit()
if (opts.column1 is None):
print "You did not give the column!\nWill exit now!\n"
sys.exit()
if opts.delim is None: alldata = N.loadtxt(opts.input, delimiter=" ", skiprows=0)
else: alldata = N.loadtxt(opts.input, delimiter="%s" % opts.delim, skiprows=0)
if int(opts.column1) != 0: data1 = alldata[:,int(opts.column1)]
else: data1 = alldata
if (opts.column2 is None):
#Only one column statistics here
count1 = len(data1)
mean1 = N.mean(data1)
median1 = N.median(data1)
mode1 = mode(data1)
min1 = N.min(data1)
max1 = N.max(data1)
sum1 = N.sum(data1)
gmean1 = gmean(data1)
hmean1 = hmean(data1)
kurtosis1 = kurtosis(data1)
kurtosisp1 = kurtosis(data1, fisher = False)
kurtosist1 = kurtosistest(data1)
moment1 = moment(data1)
normaltest1 = normaltest(data1)
sem1 = sem(data1)
skew1 = skew(data1)
skewt1 = skewtest(data1)
sos1 = square_of_sums(data1)
ss1 = ss(data1)
std1 = std(data1)
stderr1 = stderr(data1)
var1 = var(data1)
moment11 = moment(data1, moment=1)
moment12 = moment(data1, moment=2)
moment13 = moment(data1, moment=3)
per125 = scoreatpercentile(data1, 25)
per175 = scoreatpercentile(data1, 75)
else:
#Two column statistics here
data2 = alldata[:,int(opts.column2)]
count1 = len(data1)
mean1 = N.mean(data1)
median1 = N.median(data1)
mode1 = mode(data1)
min1 = N.min(data1)
max1 = N.max(data1)
sum1 = N.sum(data1)
gmean1 = gmean(data1)
hmean1 = hmean(data1)
kurtosis1 = kurtosis(data1)
kurtosisp1 = kurtosis(data1, fisher = False)
kurtosist1 = kurtosistest(data1)
moment11 = moment(data1, moment=1)
moment12 = moment(data1, moment=2)
moment13 = moment(data1, moment=3)
per125 = scoreatpercentile(data1, 25)
per175 = scoreatpercentile(data1, 75)
normaltest1 = normaltest(data1)
sem1 = sem(data1)
skew1 = skew(data1)
skewt1 = skewtest(data1)
sos1 = square_of_sums(data1)
ss1 = ss(data1)
std1 = N.std(data1)
stderr1 = stderr(data1)
var1 = N.var(data1)
count2 = len(data2)
mean2 = N.mean(data2)
median2 = N.median(data2)
mode2 = mode(data2)
min2 = N.min(data2)
max2 = N.max(data2)
sum2 = N.sum(data2)
gmean2 = gmean(data2)
hmean2 = hmean(data2)
kurtosis2 = kurtosis(data2)
kurtosisp2 = kurtosis(data2, fisher = False)
kurtosist2 = kurtosistest(data2)
moment2 = moment(data2)
normaltest2 = normaltest(data2)
sem2 = sem(data2)
skew2 = skew(data2)
skewt2 = skewtest(data2)
sos2 = square_of_sums(data2)
ss2 = ss(data2)
std2 = N.std(data2)
stderr2 = stderr(data2)
var2 = N.var(data2)
moment21 = moment(data2, moment=1)
moment22 = moment(data2, moment=2)
moment23 = moment(data2, moment=3)
per225 = scoreatpercentile(data2, 25)
per275 = scoreatpercentile(data2, 75)
#tests between the two datasets
corrcoef = corrcoef(data1,data2)
kendalltau = kendalltau(data1,data2)
ks = ks_2samp(data1,data2)
mannw = mannwhitneyu(data1,data2)
pearsonr = pearsonr(data1,data2)
spearmanr = spearmanr(data1,data2)
#outputs
if opts.verbose == True:
print "The verbose output!"
if (opts.column2 is None):
print "Statistics of column %i" % int(opts.column1)
print "Count: %i" % count1
print "Mean: %f" % mean1
print "Harmonic mean: %f" % hmean1
print "Geometric mean: %f" % gmean1
print "Median: %f" % median1
print "Mode (modal value, counts): %f, %i" % (mode1[0], mode1[1])
print "Min: %f" % min1
print "Max: %f" % max1
print "Standard deviation: %f" % std1
print "Standard deviation error: %f" % stderr1
print "Variance: %f" % var1
print "Kurtosis (Fisher): %f" % kurtosis1
print "Kurtosis (Pearson): %f" % kurtosisp1
print "Kurtosis test (Z-score, 2-tail Z-probability): %f, %s" % (kurtosist1[0], kurtosist1[1])
print "Skewness: %f " % skew1
print "Skewness test (Z-score, 2-tail Z-probability): %f, %s" % (skewt1[0], skewt1[1])
print "Normality test (Chi**2 score,2-tail probability): %f, %s" % (normaltest1[0], normaltest1[1])
print "Standard error of mean: %f" % sem1
print "Sum: %f" % sum1
print "Square of Sums: %f" % sos1
print "Sums of Squares: %f" % ss1
print "1st moment: %f" % moment11
print "2nd moment: %f" % moment12
print "3rd moment: %f" % moment13
print "25th (1st quartile) percentile: %f" % per125
print "75th (3rd quartile) percentile: %f" % per175
else:
print "Statistics of column %i" % int(opts.column1)
print "Count: %i" % count1
print "Mean: %f" % mean1
print "Harmonic mean: %f" % hmean1
print "Geometric mean: %f" % gmean1
print "Median: %f" % median1
print "Mode (modal value, counts): %f, %i" % (mode1[0], mode1[1])
print "Min: %f" % min1
print "Max: %f" % max1
print "Standard deviation: %f" % std1
print "Standard deviation error: %f" % stderr1
print "Variance: %f" % var1
print "Kurtosis (Fisher): %f" % kurtosis1
print "Kurtosis (Pearson): %f" % kurtosisp1
print "Kurtosis test (Z-score, 2-tail Z-probability): %f, %s" % (kurtosist1[0], kurtosist1[1])
print "Skewness: %f " % skew1
print "Skewness test (Z-score, 2-tail Z-probability): %f, %s" % (skewt1[0], skewt1[1])
print "Normality test (Chi**2 score,2-tail probability): %f, %s" % (normaltest1[0], normaltest1[1])
print "Standard error of mean: %f" % sem1
print "Sum: %f" % sum1
print "Square of Sums: %f" % sos1
print "Sums of Squares: %f" % ss1
print "1st moment: %f" % moment11
print "2nd moment: %f" % moment12
print "3rd moment: %f" % moment13
print "25th (1st quartile) percentile: %f" % per125
print "75th (3rd quartile) percentile: %f" % per175
print
print "Statistics of column %i" % int(opts.column2)
print "Count: %i" % count2
print "Mean: %f" % mean2
print "Harmonic mean: %f" % hmean2
print "Geometric mean: %f" % gmean2
print "Median: %f" % median2
print "Mode (modal value, counts): %f, %i" % (mode2[0], mode2[1])
print "Min: %f" % min2
print "Max: %f" % max2
print "Standard deviation: %f" % std2
print "Standard deviation error: %f" % stderr2
print "Variance: %f" % var2
print "Kurtosis (Fisher): %f" % kurtosis2
print "Kurtosis (Pearson): %f" % kurtosisp2
print "Kurtosis test (Z-score, 2-tail Z-probability): %f, %s" % (kurtosist2[0], kurtosist2[1])
print "Skewness: %f " % skew2
print "Skewness test (Z-score, 2-tail Z-probability): %f, %s" % (skewt2[0], skewt1[1])
print "Normality test (Chi**2 score,2-tail probability): %f, %s" % (normaltest2[0], normaltest2[1])
print "Standard error of mean: %f" % sem2
print "Sum: %f" % sum2
print "Square of Sums: %f" % sos2
print "Sum of Squares: %f" % ss2
print "1st moment: %f" % moment21
print "2nd moment: %f" % moment22
print "3rd moment: %f" % moment23
print "25th (1st quartile) percentile: %f" % per225
print "75th (3rd quartile) percentile: %f" % per275
print "Statistical tests between columns %i and %i" % (int(opts.column1), int(opts.column2))
print "Correlation Coefficients:\n %f, %f, %f, %f" % (corrcoef[0,0], corrcoef[0,1], corrcoef[1,0], corrcoef[1,1])
print "Kendall's tau (Kendall's tau, two-tailed p-value):\n %f, %s" % (kendalltau[0],kendalltau[1])
print "Kolmogorov-Smirnov (KS D-value, p-value):\n %f, %s" % (ks[0],ks[1])
print "Mann-Whitney U (u-statistic, one-tailed p-value (i.e., p(z(U)))):\n %f, %s" % (mannw[0], mannw[1])
print "Pearson Correlation Coefficient (Pearson's correlation coefficient, 2-tailed p-value):\n %f, %s" % (pearsonr[0], pearsonr[1])
print "Spearman rank-order Correlation Coefficient (Spearman correlation coefficient, 2-tailed p-value):\n %f, %s" % (spearmanr[0], spearmanr[1])
if (opts.output is not None):
file = open(opts.output, 'w')
if (opts.column2 is None):
file.write("Statistics of column %i \n" % int(opts.column1))
file.write("Count: %i \n" % count1)
file.write("Mean: %f \n" % mean1)
file.write("Harmonic mean: %f \n" % hmean1)
file.write("Geometric mean: %f \n" % gmean1)
file.write("Median: %f \n" % median1)
file.write("Mode (modal value, counts): %f, %i \n" % (mode1[0], mode1[1]))
file.write("Min: %f \n" % min1)
file.write("Max: %f \n" % max1)
file.write("Standard deviation: %f \n" % std1)
file.write("Standard deviation error: %f \n" % stderr1)
file.write("Variance: %f \n" % var1)
file.write("Kurtosis (Fisher): %f \n" % kurtosis1)
file.write("Kurtosis (Pearson): %f \n" % kurtosisp1)
file.write("Kurtosis test (Z-score, 2-tail Z-probability): %f, %s \n" % (kurtosist1[0], kurtosist1[1]))
file.write("Skewness: %f \n" % skew1)
file.write("Skewness test (Z-score, 2-tail Z-probability): %f, %s \n" % (skewt1[0], skewt1[1]))
file.write("Normality test (Chi**2 score,2-tail probability): %f, %s \n" % (normaltest1[0], normaltest1[1]))
file.write("Standard error of mean: %f \n" % sem1)
file.write("Sum: %f \n" % sum1)
file.write("Square of Sums: %f \n" % sos1)
file.write("Sums of Squares: %f \n" % ss1)
file.write("1st moment: %f \n" % moment11)
file.write("2nd moment: %f \n" % moment12)
file.write("3rd moment: %f \n" % moment13)
file.write("25th (1st quartile) percentile: %f\n" % per125)
file.write("75th (3rd quartile) percentile: %f\n" % per175)
else:
file.write("Statistics of column %i \n" % int(opts.column1))
file.write("Count: %i \n" % count1)
file.write("Mean: %f \n" % mean1)
file.write("Harmonic mean: %f \n" % hmean1)
file.write("Geometric mean: %f \n" % gmean1)
file.write("Median: %f \n" % median1)
file.write("Mode (modal value, counts): %f, %i \n" % (mode1[0], mode1[1]))
file.write("Min: %f \n" % min1)
file.write("Max: %f \n" % max1)
file.write("Standard deviation: %f \n" % std1)
file.write("Standard deviation error: %f \n" % stderr1)
file.write("Variance: %f \n" % var1)
file.write("Kurtosis (Fisher): %f \n" % kurtosis1)
file.write("Kurtosis (Pearson): %f \n" % kurtosisp1)
file.write("Kurtosis test (Z-score, 2-tail Z-probability): %f, %s \n" % (kurtosist1[0], kurtosist1[1]))
file.write("Skewness: %f \n" % skew1)
file.write("Skewness test (Z-score, 2-tail Z-probability): %f, %s \n" % (skewt1[0], skewt1[1]))
file.write("Normality test (Chi**2 score,2-tail probability): %f, %s \n" % (normaltest1[0], normaltest1[1]))
file.write("Standard error of mean: %f \n" % sem1)
file.write("Sum: %f \n" % sum1)
file.write("Square of Sums: %f \n" % sos1)
file.write("Sums of Squares: %f \n" % ss1)
file.write("1st moment: %f \n" % moment11)
file.write("2nd moment: %f \n" % moment12)
file.write("3rd moment: %f \n" % moment13)
file.write("25th (1st quartile) percentile: %f\n" % per125)
file.write("75th (3rd quartile) percentile: %f\n" % per175)
file.write("\n")
file.write("Statistics of column %i \n" % int(opts.column2))
file.write("Count: %i \n" % count2)
file.write("Mean: %f \n" % mean2)
file.write("Harmonic mean: %f \n" % hmean2)
file.write("Geometric mean: %f \n" % gmean2)
file.write("Median: %f \n" % median2)
file.write("Mode (modal value, counts): %f, %i \n" % (mode2[0], mode2[1]))
file.write("Min: %f \n" % min2)
file.write("Max: %f \n" % max2)
file.write("Standard deviation: %f \n" % std2)
file.write("Standard deviation error: %f \n" % stderr2)
file.write("Variance: %f \n" % var2)
file.write("Kurtosis (Fisher): %f \n" % kurtosis2)
file.write("Kurtosis (Pearson): %f \n" % kurtosisp2)
file.write("Kurtosis test (Z-score, 2-tail Z-probability): %f, %s \n" % (kurtosist2[0], kurtosist2[1]))
file.write("Skewness: %f \n" % skew2)
file.write("Skewness test (Z-score, 2-tail Z-probability): %f, %s \n" % (skewt2[0], skewt1[1]))
file.write("Normality test (Chi**2 score,2-tail probability): %f, %s \n" % (normaltest2[0], normaltest2[1]))
file.write("Standard error of mean: %f \n" % sem2)
file.write("Sum: %f \n" % sum2)
file.write("Square of Sums: %f \n" % sos2)
file.write("Sum of Squares: %f \n" % ss2)
file.write("1st moment: %f \n" % moment21)
file.write("2nd moment: %f \n" % moment22)
file.write("3rd moment: %f \n" % moment23)
file.write("25th (1st quartile) percentile: %f\n" % per225)
file.write("75th (3rd quartile) percentile: %f\n" % per275)
file.write("Statistical tests between columns %i and %i \n" % (int(opts.column1), int(opts.column2)))
file.write("Correlation Coefficients:\n %f, %f, %f, %f \n" % (corrcoef[0,0], corrcoef[0,1], corrcoef[1,0], corrcoef[1,1]))
file.write("Kendall's tau (Kendall's tau, two-tailed p-value):\n %f, %s \n" % (kendalltau[0],kendalltau[1]))
file.write("Kolmogorov-Smirnov (KS D-value, p-value):\n %f, %s \n" % (ks[0],ks[1]))
file.write("Mann-Whitney U (u-statistic, one-tailed p-value (i.e., p(z(U)))):\n %f, %s \n" % (mannw[0], mannw[1]))
file.write("Pearson Correlation Coefficient (Pearson's correlation coefficient, 2-tailed p-value):\n %f, %s \n" % (pearsonr[0], pearsonr[1]))
file.write("Spearman rank-order Correlation Coefficient (Spearman correlation coefficient, 2-tailed p-value):\n %f, %s \n" % (spearmanr[0], spearmanr[1]))
file.flush()
file.close()
#END
| |
"""The test for the bayesian sensor platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.components.binary_sensor import bayesian
from tests.common import get_test_home_assistant
class TestBayesianBinarySensor(unittest.TestCase):
"""Test the threshold sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_sensor_numeric_state(self):
"""Test sensor on numeric state platform observations."""
config = {
'binary_sensor': {
'platform':
'bayesian',
'name':
'Test_Binary',
'observations': [{
'platform': 'numeric_state',
'entity_id': 'sensor.test_monitored',
'below': 10,
'above': 5,
'prob_given_true': 0.6
}, {
'platform': 'numeric_state',
'entity_id': 'sensor.test_monitored1',
'below': 7,
'above': 5,
'prob_given_true': 0.9,
'prob_given_false': 0.1
}],
'prior':
0.2,
}
}
assert setup_component(self.hass, 'binary_sensor', config)
self.hass.states.set('sensor.test_monitored', 4)
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([], state.attributes.get('observations'))
self.assertEqual(0.2, state.attributes.get('probability'))
assert state.state == 'off'
self.hass.states.set('sensor.test_monitored', 6)
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 4)
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 6)
self.hass.states.set('sensor.test_monitored1', 6)
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([{
'prob_false': 0.4,
'prob_true': 0.6
}, {
'prob_false': 0.1,
'prob_true': 0.9
}], state.attributes.get('observations'))
self.assertAlmostEqual(0.77, state.attributes.get('probability'))
assert state.state == 'on'
self.hass.states.set('sensor.test_monitored', 6)
self.hass.states.set('sensor.test_monitored1', 0)
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 4)
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual(0.2, state.attributes.get('probability'))
assert state.state == 'off'
self.hass.states.set('sensor.test_monitored', 15)
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
assert state.state == 'off'
def test_sensor_state(self):
"""Test sensor on state platform observations."""
config = {
'binary_sensor': {
'name':
'Test_Binary',
'platform':
'bayesian',
'observations': [{
'platform': 'state',
'entity_id': 'sensor.test_monitored',
'to_state': 'off',
'prob_given_true': 0.8,
'prob_given_false': 0.4
}],
'prior':
0.2,
'probability_threshold':
0.32,
}
}
assert setup_component(self.hass, 'binary_sensor', config)
self.hass.states.set('sensor.test_monitored', 'on')
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([], state.attributes.get('observations'))
self.assertEqual(0.2, state.attributes.get('probability'))
assert state.state == 'off'
self.hass.states.set('sensor.test_monitored', 'off')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'on')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'off')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([{
'prob_true': 0.8,
'prob_false': 0.4
}], state.attributes.get('observations'))
self.assertAlmostEqual(0.33, state.attributes.get('probability'))
assert state.state == 'on'
self.hass.states.set('sensor.test_monitored', 'off')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'on')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertAlmostEqual(0.2, state.attributes.get('probability'))
assert state.state == 'off'
def test_threshold(self):
"""Test sensor on probabilty threshold limits."""
config = {
'binary_sensor': {
'name':
'Test_Binary',
'platform':
'bayesian',
'observations': [{
'platform': 'state',
'entity_id': 'sensor.test_monitored',
'to_state': 'on',
'prob_given_true': 1.0,
}],
'prior':
0.5,
'probability_threshold':
1.0,
}
}
assert setup_component(self.hass, 'binary_sensor', config)
self.hass.states.set('sensor.test_monitored', 'on')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertAlmostEqual(1.0, state.attributes.get('probability'))
assert state.state == 'on'
def test_multiple_observations(self):
"""Test sensor with multiple observations of same entity."""
config = {
'binary_sensor': {
'name':
'Test_Binary',
'platform':
'bayesian',
'observations': [{
'platform': 'state',
'entity_id': 'sensor.test_monitored',
'to_state': 'blue',
'prob_given_true': 0.8,
'prob_given_false': 0.4
}, {
'platform': 'state',
'entity_id': 'sensor.test_monitored',
'to_state': 'red',
'prob_given_true': 0.2,
'prob_given_false': 0.4
}],
'prior':
0.2,
'probability_threshold':
0.32,
}
}
assert setup_component(self.hass, 'binary_sensor', config)
self.hass.states.set('sensor.test_monitored', 'off')
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([], state.attributes.get('observations'))
self.assertEqual(0.2, state.attributes.get('probability'))
assert state.state == 'off'
self.hass.states.set('sensor.test_monitored', 'blue')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'off')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'blue')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertEqual([{
'prob_true': 0.8,
'prob_false': 0.4
}], state.attributes.get('observations'))
self.assertAlmostEqual(0.33, state.attributes.get('probability'))
assert state.state == 'on'
self.hass.states.set('sensor.test_monitored', 'blue')
self.hass.block_till_done()
self.hass.states.set('sensor.test_monitored', 'red')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test_binary')
self.assertAlmostEqual(0.11, state.attributes.get('probability'))
assert state.state == 'off'
def test_probability_updates(self):
"""Test probability update function."""
prob_true = [0.3, 0.6, 0.8]
prob_false = [0.7, 0.4, 0.2]
prior = 0.5
for pt, pf in zip(prob_true, prob_false):
prior = bayesian.update_probability(prior, pt, pf)
self.assertAlmostEqual(0.720000, prior)
prob_true = [0.8, 0.3, 0.9]
prob_false = [0.6, 0.4, 0.2]
prior = 0.7
for pt, pf in zip(prob_true, prob_false):
prior = bayesian.update_probability(prior, pt, pf)
self.assertAlmostEqual(0.9130434782608695, prior)
| |
from rpython.rtyper.test.test_llinterp import gengraph, interpret
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rlib import rgc # Force registration of gc.collect
import gc
import py, sys
def test_collect():
def f():
return gc.collect()
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
assert len(op.args) == 0
res = interpret(f, [])
assert res is None
def test_collect_0():
if sys.version_info < (2, 5):
py.test.skip("requires Python 2.5 to call gc.collect() with an arg")
def f():
return gc.collect(0)
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
assert len(op.args) == 1
assert op.args[0].value == 0
res = interpret(f, [])
assert res is None
def test_can_move():
T0 = lltype.GcStruct('T')
T1 = lltype.GcArray(lltype.Float)
def f(i):
if i:
return rgc.can_move(lltype.malloc(T0))
else:
return rgc.can_move(lltype.malloc(T1, 1))
t, typer, graph = gengraph(f, [int])
ops = list(graph.iterblockops())
res = [op for op in ops if op[1].opname == 'gc_can_move']
assert len(res) == 2
res = interpret(f, [1])
assert res == True
def test_ll_arraycopy_1():
TYPE = lltype.GcArray(lltype.Signed)
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
for i in range(10): a1[i] = 100 + i
for i in range(6): a2[i] = 200 + i
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == 100 + i
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == 200 + i
def test_ll_arraycopy_2():
TYPE = lltype.GcArray(lltype.Void)
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
# nothing to assert here, should not crash...
def test_ll_arraycopy_3():
S = lltype.Struct('S') # non-gc
TYPE = lltype.GcArray(lltype.Ptr(S))
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = [None] * 10
org2 = [None] * 6
for i in range(10): a1[i] = org1[i] = lltype.malloc(S, immortal=True)
for i in range(6): a2[i] = org2[i] = lltype.malloc(S, immortal=True)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_arraycopy_4():
S = lltype.GcStruct('S')
TYPE = lltype.GcArray(lltype.Ptr(S))
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = [None] * 10
org2 = [None] * 6
for i in range(10): a1[i] = org1[i] = lltype.malloc(S)
for i in range(6): a2[i] = org2[i] = lltype.malloc(S)
rgc.ll_arraycopy(a1, a2, 4, 2, 3)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 5:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_arraycopy_5(monkeypatch):
S = lltype.GcStruct('S')
TYPE = lltype.GcArray(lltype.Ptr(S))
def f():
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
rgc.ll_arraycopy(a2, a1, 0, 1, 5)
CHK = lltype.Struct('CHK', ('called', lltype.Bool))
check = lltype.malloc(CHK, immortal=True)
def raw_memcopy(*args):
check.called = True
monkeypatch.setattr(llmemory, "raw_memcopy", raw_memcopy)
interpret(f, [])
assert check.called
def test_ll_arraycopy_array_of_structs():
TP = lltype.GcArray(lltype.Struct('x', ('x', lltype.Signed),
('y', lltype.Signed)))
def f():
a1 = lltype.malloc(TP, 3)
a2 = lltype.malloc(TP, 3)
for i in range(3):
a1[i].x = 2 * i
a1[i].y = 2 * i + 1
rgc.ll_arraycopy(a1, a2, 0, 0, 3)
for i in range(3):
assert a2[i].x == 2 * i
assert a2[i].y == 2 * i + 1
interpret(f, [])
a1 = lltype.malloc(TP, 3)
a2 = lltype.malloc(TP, 3)
a1[1].x = 3
a1[1].y = 15
rgc.copy_struct_item(a1, a2, 1, 2)
assert a2[2].x == 3
assert a2[2].y == 15
def test_ll_arrayclear():
TYPE = lltype.GcArray(lltype.Signed)
a1 = lltype.malloc(TYPE, 10)
for i in range(10):
a1[i] = 100 + i
rgc.ll_arrayclear(a1)
assert len(a1) == 10
for i in range(10):
assert a1[i] == 0
def test__contains_gcptr():
assert not rgc._contains_gcptr(lltype.Signed)
assert not rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed)))
assert rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed),
('y', lltype.Ptr(lltype.GcArray(lltype.Signed)))))
assert rgc._contains_gcptr(
lltype.Struct('x', ('x', lltype.Signed),
('y', llmemory.GCREF)))
assert rgc._contains_gcptr(lltype.Ptr(lltype.GcStruct('x')))
assert not rgc._contains_gcptr(lltype.Ptr(lltype.Struct('x')))
GCPTR = lltype.Ptr(lltype.GcStruct('x'))
assert rgc._contains_gcptr(
lltype.Struct('FOO', ('s', lltype.Struct('BAR', ('y', GCPTR)))))
def test_ll_arraycopy_small():
TYPE = lltype.GcArray(lltype.Signed)
for length in range(5):
a1 = lltype.malloc(TYPE, 10)
a2 = lltype.malloc(TYPE, 6)
org1 = range(20, 30)
org2 = range(50, 56)
for i in range(len(a1)): a1[i] = org1[i]
for i in range(len(a2)): a2[i] = org2[i]
rgc.ll_arraycopy(a1, a2, 4, 2, length)
for i in range(10):
assert a1[i] == org1[i]
for i in range(6):
if 2 <= i < 2 + length:
assert a2[i] == a1[i+2]
else:
assert a2[i] == org2[i]
def test_ll_shrink_array_1():
py.test.skip("implement ll_shrink_array for GcStructs or GcArrays that "
"don't have the shape of STR or UNICODE")
def test_ll_shrink_array_2():
S = lltype.GcStruct('S', ('x', lltype.Signed),
('vars', lltype.Array(lltype.Signed)))
s1 = lltype.malloc(S, 5)
s1.x = 1234
for i in range(5):
s1.vars[i] = 50 + i
s2 = rgc.ll_shrink_array(s1, 3)
assert lltype.typeOf(s2) == lltype.Ptr(S)
assert s2.x == 1234
assert len(s2.vars) == 3
for i in range(3):
assert s2.vars[i] == 50 + i
def test_get_referents():
class X(object):
__slots__ = ['stuff']
x1 = X()
x1.stuff = X()
x2 = X()
lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1))
lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst]
assert x1.stuff in lst2
assert x2 not in lst2
def test_get_memory_usage():
class X(object):
pass
x1 = X()
n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1))
assert n >= 8 and n <= 64
def test_register_custom_trace_hook():
TP = lltype.GcStruct('X')
def trace_func():
xxx # should not be annotated here
lambda_trace_func = lambda: trace_func
def f():
rgc.register_custom_trace_hook(TP, lambda_trace_func)
t, typer, graph = gengraph(f, [])
assert typer.custom_trace_funcs == [(TP, trace_func)]
def test_nonmoving_raw_ptr_for_resizable_list():
def f(n):
lst = ['a', 'b', 'c']
lst = rgc.resizable_list_supporting_raw_ptr(lst)
lst.append(chr(n))
assert lst[3] == chr(n)
assert lst[-1] == chr(n)
#
ptr = rgc.nonmoving_raw_ptr_for_resizable_list(lst)
assert lst[:] == ['a', 'b', 'c', chr(n)]
assert lltype.typeOf(ptr) == rffi.CCHARP
assert [ptr[i] for i in range(4)] == ['a', 'b', 'c', chr(n)]
#
lst[-3] = 'X'
assert ptr[1] == 'X'
ptr[2] = 'Y'
assert lst[-2] == 'Y'
#
addr = rffi.cast(lltype.Signed, ptr)
ptr = rffi.cast(rffi.CCHARP, addr)
rgc.collect() # should not move lst.items
lst[-4] = 'g'
assert ptr[0] == 'g'
ptr[3] = 'H'
assert lst[-1] == 'H'
return lst
#
# direct untranslated run
lst = f(35)
assert isinstance(lst, rgc._ResizableListSupportingRawPtr)
#
# llinterp run
interpret(f, [35])
#
# compilation with the GC transformer
import subprocess
from rpython.translator.interactive import Translation
#
def main(argv):
f(len(argv))
print "OK!"
return 0
#
t = Translation(main, gc="incminimark")
t.disable(['backendopt'])
t.set_backend_extra_options(c_debug_defines=True)
exename = t.compile()
data = subprocess.check_output([str(exename), '.', '.', '.'])
assert data.strip().endswith('OK!')
def test_nonmoving_raw_ptr_for_resizable_list_getslice():
def f(n):
lst = ['a', 'b', 'c', 'd', 'e']
lst = rgc.resizable_list_supporting_raw_ptr(lst)
lst = lst[:3]
lst.append(chr(n))
assert lst[3] == chr(n)
assert lst[-1] == chr(n)
#
ptr = rgc.nonmoving_raw_ptr_for_resizable_list(lst)
assert lst[:] == ['a', 'b', 'c', chr(n)]
assert lltype.typeOf(ptr) == rffi.CCHARP
assert [ptr[i] for i in range(4)] == ['a', 'b', 'c', chr(n)]
return lst
#
# direct untranslated run
lst = f(35)
assert isinstance(lst, rgc._ResizableListSupportingRawPtr)
#
# llinterp run
interpret(f, [35])
def test_ll_for_resizable_list():
def f(n):
lst = ['a', 'b', 'c']
lst = rgc.resizable_list_supporting_raw_ptr(lst)
lst.append(chr(n))
assert lst[3] == chr(n)
assert lst[-1] == chr(n)
#
ll_list = rgc.ll_for_resizable_list(lst)
assert lst[:] == ['a', 'b', 'c', chr(n)]
assert ll_list.length == 4
assert [ll_list.items[i] for i in range(4)] == ['a', 'b', 'c', chr(n)]
#
lst[-3] = 'X'
assert ll_list.items[1] == 'X'
ll_list.items[2] = 'Y'
assert lst[-2] == 'Y'
#
return lst
#
# direct untranslated run
lst = f(35)
assert isinstance(lst, rgc._ResizableListSupportingRawPtr)
#
# llinterp run
interpret(f, [35])
#
# compilation with the GC transformer
import subprocess
from rpython.translator.interactive import Translation
#
def main(argv):
f(len(argv))
print "OK!"
return 0
#
t = Translation(main, gc="incminimark")
t.disable(['backendopt'])
t.set_backend_extra_options(c_debug_defines=True)
exename = t.compile()
data = subprocess.check_output([str(exename), '.', '.', '.'])
assert data.strip().endswith('OK!')
def test_ListSupportingRawPtr_direct():
lst = ['a', 'b', 'c']
lst = rgc.resizable_list_supporting_raw_ptr(lst)
def check_nonresizing():
assert lst[1] == lst[-2] == 'b'
lst[1] = 'X'
assert lst[1] == 'X'
lst[-1] = 'Y'
assert lst[1:3] == ['X', 'Y']
assert lst[-2:9] == ['X', 'Y']
lst[1:2] = 'B'
assert lst[:] == ['a', 'B', 'Y']
assert list(iter(lst)) == ['a', 'B', 'Y']
assert list(reversed(lst)) == ['Y', 'B', 'a']
assert 'B' in lst
assert 'b' not in lst
assert p[0] == 'a'
assert p[1] == 'B'
assert p[2] == 'Y'
assert lst + ['*'] == ['a', 'B', 'Y', '*']
assert ['*'] + lst == ['*', 'a', 'B', 'Y']
assert lst + lst == ['a', 'B', 'Y', 'a', 'B', 'Y']
base = ['8']
base += lst
assert base == ['8', 'a', 'B', 'Y']
assert lst == ['a', 'B', 'Y']
assert ['a', 'B', 'Y'] == lst
assert ['a', 'B', 'Z'] != lst
assert ['a', 'B', 'Z'] > lst
assert ['a', 'B', 'Z'] >= lst
assert lst * 2 == ['a', 'B', 'Y', 'a', 'B', 'Y']
assert 2 * lst == ['a', 'B', 'Y', 'a', 'B', 'Y']
assert lst.count('B') == 1
assert lst.index('Y') == 2
lst.reverse()
assert lst == ['Y', 'B', 'a']
lst.sort()
assert lst == ['B', 'Y', 'a']
lst.sort(reverse=True)
assert lst == ['a', 'Y', 'B']
lst[1] = 'b'
lst[2] = 'c'
assert list(lst) == ['a', 'b', 'c']
p = lst
check_nonresizing()
assert lst._ll_list is None
p = lst._nonmoving_raw_ptr_for_resizable_list()
ll_list = rgc.ll_for_resizable_list(lst)
assert ll_list is lst._ll_list
check_nonresizing()
assert lst._ll_list == ll_list
assert p[0] == ll_list.items[0] == 'a'
assert p[1] == ll_list.items[1] == 'b'
assert p[2] == ll_list.items[2] == 'c'
def do_resizing_operation():
del lst[1]
yield ['a', 'c']
lst[:2] = ['X']
yield ['X', 'c']
del lst[:2]
yield ['c']
x = lst
x += ['t']
yield ['a', 'b', 'c', 't']
x = lst
x *= 3
yield ['a', 'b', 'c'] * 3
lst.append('f')
yield ['a', 'b', 'c', 'f']
lst.extend('fg')
yield ['a', 'b', 'c', 'f', 'g']
lst.insert(1, 'k')
yield ['a', 'k', 'b', 'c']
n = lst.pop(1)
assert n == 'b'
yield ['a', 'c']
lst.remove('c')
yield ['a', 'b']
assert lst == ['a', 'b', 'c']
for expect in do_resizing_operation():
assert lst == expect
assert lst._ll_list is None
lst = ['a', 'b', 'c']
lst = rgc.resizable_list_supporting_raw_ptr(lst)
lst._nonmoving_raw_ptr_for_resizable_list()
# ____________________________________________________________
class T_Root(object):
pass
class T_Int(T_Root):
def __init__(self, x):
self.x = x
class SimpleFQ(rgc.FinalizerQueue):
Class = T_Root
_triggered = 0
def finalizer_trigger(self):
self._triggered += 1
class TestFinalizerQueue:
def test_simple(self):
fq = SimpleFQ()
assert fq.next_dead() is None
assert fq._triggered == 0
w = T_Int(67)
fq.register_finalizer(w)
#
gc.collect()
assert fq._triggered == 0
assert fq.next_dead() is None
#
del w
gc.collect()
assert fq._triggered == 1
n = fq.next_dead()
assert type(n) is T_Int and n.x == 67
#
gc.collect()
assert fq._triggered == 1
assert fq.next_dead() is None
def test_del_1(self):
deleted = {}
class T_Del(T_Int):
def __del__(self):
deleted[self.x] = deleted.get(self.x, 0) + 1
fq = SimpleFQ()
fq.register_finalizer(T_Del(42))
gc.collect(); gc.collect()
assert deleted == {}
assert fq._triggered == 1
n = fq.next_dead()
assert type(n) is T_Del and n.x == 42
assert deleted == {}
del n
gc.collect()
assert fq.next_dead() is None
assert deleted == {42: 1}
assert fq._triggered == 1
def test_del_2(self):
deleted = {}
class T_Del1(T_Int):
def __del__(self):
deleted[1, self.x] = deleted.get((1, self.x), 0) + 1
class T_Del2(T_Del1):
def __del__(self):
deleted[2, self.x] = deleted.get((2, self.x), 0) + 1
T_Del1.__del__(self)
fq = SimpleFQ()
w = T_Del2(42)
fq.register_finalizer(w)
del w
fq.register_finalizer(T_Del1(21))
gc.collect(); gc.collect()
assert deleted == {}
assert fq._triggered == 2
a = fq.next_dead()
b = fq.next_dead()
if a.x == 21:
a, b = b, a
assert type(a) is T_Del2 and a.x == 42
assert type(b) is T_Del1 and b.x == 21
assert deleted == {}
del a, b
gc.collect()
assert fq.next_dead() is None
assert deleted == {(1, 42): 1, (2, 42): 1, (1, 21): 1}
assert fq._triggered == 2
def test_del_3(self):
deleted = {}
class T_Del1(T_Int):
def __del__(self):
deleted[1, self.x] = deleted.get((1, self.x), 0) + 1
class T_Del2(T_Del1):
pass
fq = SimpleFQ()
fq.register_finalizer(T_Del2(42))
gc.collect(); gc.collect()
assert deleted == {}
assert fq._triggered == 1
a = fq.next_dead()
assert type(a) is T_Del2 and a.x == 42
assert deleted == {}
del a
gc.collect()
assert fq.next_dead() is None
assert deleted == {(1, 42): 1}
assert fq._triggered == 1
def test_finalizer_trigger_calls_too_much(self):
external_func = rffi.llexternal("foo", [], lltype.Void)
# ^^^ with release_gil=True
class X(object):
pass
class FQ(rgc.FinalizerQueue):
Class = X
def finalizer_trigger(self):
external_func()
fq = FQ()
def f():
x = X()
fq.register_finalizer(x)
e = py.test.raises(TyperError, gengraph, f, [])
assert str(e.value).startswith('the RPython-level __del__() method in')
| |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Python wrappers for the Google Storage RESTful API."""
__all__ = ['ReadBuffer',
'StreamingBuffer',
]
import collections
import os
import urlparse
from cloudstorage import common, api_utils
from . import errors
from . import rest_api
try:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
def _get_storage_api(retry_params, account_id=None):
"""Returns storage_api instance for API methods.
Args:
retry_params: An instance of api_utils.RetryParams. If none,
thread's default will be used.
account_id: Internal-use only.
Returns:
A storage_api instance to handle urlfetch work to GCS.
On dev appserver, this instance by default will talk to a local stub
unless common.ACCESS_TOKEN is set. That token will be used to talk
to the real GCS.
"""
api = _StorageApi(_StorageApi.full_control_scope,
service_account_id=account_id,
retry_params=retry_params)
if common.local_run() and not common.get_access_token():
api.api_url = common.local_api_url()
if common.get_access_token():
api.token = common.get_access_token()
return api
class _StorageApi(rest_api._RestApi):
"""A simple wrapper for the Google Storage RESTful API.
WARNING: Do NOT directly use this api. It's an implementation detail
and is subject to change at any release.
All async methods have similar args and returns.
Args:
path: The path to the Google Storage object or bucket, e.g.
'/mybucket/myfile' or '/mybucket'.
**kwd: Options for urlfetch. e.g.
headers={'content-type': 'text/plain'}, payload='blah'.
Returns:
A ndb Future. When fulfilled, future.get_result() should return
a tuple of (status, headers, content) that represents a HTTP response
of Google Cloud Storage XML API.
"""
api_url = 'https://storage.googleapis.com'
read_only_scope = 'https://www.googleapis.com/auth/devstorage.read_only'
read_write_scope = 'https://www.googleapis.com/auth/devstorage.read_write'
full_control_scope = 'https://www.googleapis.com/auth/devstorage.full_control'
def __getstate__(self):
"""Store state as part of serialization/pickling.
Returns:
A tuple (of dictionaries) with the state of this object
"""
return (super(_StorageApi, self).__getstate__(), {'api_url': self.api_url})
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the tuple from a __getstate__ call
"""
superstate, localstate = state
super(_StorageApi, self).__setstate__(superstate)
self.api_url = localstate['api_url']
@api_utils._eager_tasklet
@ndb.tasklet
def do_request_async(self, url, method='GET', headers=None, payload=None,
deadline=None, callback=None):
"""Inherit docs.
This method translates urlfetch exceptions to more service specific ones.
"""
if headers is None:
headers = {}
if 'x-goog-api-version' not in headers:
headers['x-goog-api-version'] = '2'
headers['accept-encoding'] = 'gzip, *'
try:
resp_tuple = yield super(_StorageApi, self).do_request_async(
url, method=method, headers=headers, payload=payload,
deadline=deadline, callback=callback)
except urlfetch.DownloadError, e:
raise errors.TimeoutError(
'Request to Google Cloud Storage timed out.', e)
raise ndb.Return(resp_tuple)
def post_object_async(self, path, **kwds):
"""POST to an object."""
return self.do_request_async(self.api_url + path, 'POST', **kwds)
def put_object_async(self, path, **kwds):
"""PUT an object."""
return self.do_request_async(self.api_url + path, 'PUT', **kwds)
def get_object_async(self, path, **kwds):
"""GET an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
def delete_object_async(self, path, **kwds):
"""DELETE an object.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'DELETE', **kwds)
def head_object_async(self, path, **kwds):
"""HEAD an object.
Depending on request headers, HEAD returns various object properties,
e.g. Content-Length, Last-Modified, and ETag.
Note: No payload argument is supported.
"""
return self.do_request_async(self.api_url + path, 'HEAD', **kwds)
def get_bucket_async(self, path, **kwds):
"""GET a bucket."""
return self.do_request_async(self.api_url + path, 'GET', **kwds)
_StorageApi = rest_api.add_sync_methods(_StorageApi)
class ReadBuffer(object):
"""A class for reading Google storage files."""
DEFAULT_BUFFER_SIZE = 1024 * 1024
MAX_REQUEST_SIZE = 30 * DEFAULT_BUFFER_SIZE
def __init__(self,
api,
path,
buffer_size=DEFAULT_BUFFER_SIZE,
max_request_size=MAX_REQUEST_SIZE):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
buffer_size: buffer size. The ReadBuffer keeps
one buffer. But there may be a pending future that contains
a second buffer. This size must be less than max_request_size.
max_request_size: Max bytes to request in one urlfetch.
"""
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
assert buffer_size <= max_request_size
self._buffer_size = buffer_size
self._max_request_size = max_request_size
self._offset = 0
self._buffer = _Buffer()
self._etag = None
get_future = self._get_segment(0, self._buffer_size, check_response=False)
status, headers, content = self._api.head_object(path)
errors.check_status(status, [200], path, resp_headers=headers, body=content)
self._file_size = long(common.get_stored_content_length(headers))
self._check_etag(headers.get('etag'))
self._buffer_future = None
if self._file_size != 0:
content, check_response_closure = get_future.get_result()
check_response_closure()
self._buffer.reset(content)
self._request_next_buffer()
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the read buffer are not stored, only the current offset for
data read by the client. A new read buffer is established at unpickling.
The head information for the object (file size and etag) are stored to
reduce startup and ensure the file has not changed.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'buffer_size': self._buffer_size,
'request_size': self._max_request_size,
'etag': self._etag,
'size': self._file_size,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
Along with restoring the state, pre-fetch the next read buffer.
"""
self._api = state['api']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
self._buffer_size = state['buffer_size']
self._max_request_size = state['request_size']
self._etag = state['etag']
self._file_size = state['size']
self._offset = state['offset']
self._buffer = _Buffer()
self.closed = state['closed']
self._buffer_future = None
if self._remaining() and not self.closed:
self._request_next_buffer()
def __iter__(self):
"""Iterator interface.
Note the ReadBuffer container itself is the iterator. It's
(quote PEP0234)
'destructive: they consumes all the values and a second iterator
cannot easily be created that iterates independently over the same values.
You could open the file for the second time, or seek() to the beginning.'
Returns:
Self.
"""
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self, size=-1):
"""Read one line delimited by '\n' from the file.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if size == 0 or not self._remaining():
return ''
data_list = []
newline_offset = self._buffer.find_newline(size)
while newline_offset < 0:
data = self._buffer.read(size)
size -= len(data)
self._offset += len(data)
data_list.append(data)
if size == 0 or not self._remaining():
return ''.join(data_list)
self._buffer.reset(self._buffer_future.get_result())
self._request_next_buffer()
newline_offset = self._buffer.find_newline(size)
data = self._buffer.read_to_offset(newline_offset + 1)
self._offset += len(data)
data_list.append(data)
return ''.join(data_list)
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list)
def _remaining(self):
return self._file_size - self._offset
def _request_next_buffer(self):
"""Request next buffer.
Requires self._offset and self._buffer are in consistent state.
"""
self._buffer_future = None
next_offset = self._offset + self._buffer.remaining()
if next_offset != self._file_size:
self._buffer_future = self._get_segment(next_offset,
self._buffer_size)
def _get_segments(self, start, request_size):
"""Get segments of the file from Google Storage as a list.
A large request is broken into segments to avoid hitting urlfetch
response size limit. Each segment is returned from a separate urlfetch.
Args:
start: start offset to request. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request.
Returns:
A list of file segments in order
"""
if not request_size:
return []
end = start + request_size
futures = []
while request_size > self._max_request_size:
futures.append(self._get_segment(start, self._max_request_size))
request_size -= self._max_request_size
start += self._max_request_size
if start < end:
futures.append(self._get_segment(start, end-start))
return [fut.get_result() for fut in futures]
@ndb.tasklet
def _get_segment(self, start, request_size, check_response=True):
"""Get a segment of the file from Google Storage.
Args:
start: start offset of the segment. Inclusive. Have to be within the
range of the file.
request_size: number of bytes to request. Have to be small enough
for a single urlfetch request. May go over the logical range of the
file.
check_response: True to check the validity of GCS response automatically
before the future returns. False otherwise. See Yields section.
Yields:
If check_response is True, the segment [start, start + request_size)
of the file.
Otherwise, a tuple. The first element is the unverified file segment.
The second element is a closure that checks response. Caller should
first invoke the closure before consuing the file segment.
Raises:
ValueError: if the file has changed while reading.
"""
end = start + request_size - 1
content_range = '%d-%d' % (start, end)
headers = {'Range': 'bytes=' + content_range}
status, resp_headers, content = yield self._api.get_object_async(
self._path, headers=headers)
def _checker():
errors.check_status(status, [200, 206], self._path, headers,
resp_headers, body=content)
self._check_etag(resp_headers.get('etag'))
if check_response:
_checker()
raise ndb.Return(content)
raise ndb.Return(content, _checker)
def _check_etag(self, etag):
"""Check if etag is the same across requests to GCS.
If self._etag is None, set it. If etag is set, check that the new
etag equals the old one.
In the __init__ method, we fire one HEAD and one GET request using
ndb tasklet. One of them would return first and set the first value.
Args:
etag: etag from a GCS HTTP response. None if etag is not part of the
response header. It could be None for example in the case of GCS
composite file.
Raises:
ValueError: if two etags are not equal.
"""
if etag is None:
return
elif self._etag is None:
self._etag = etag
elif self._etag != etag:
raise ValueError('File on GCS has changed while reading.')
def close(self):
self.closed = True
self._buffer = None
self._buffer_future = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self._buffer.reset()
self._buffer_future = None
if whence == os.SEEK_SET:
self._offset = offset
elif whence == os.SEEK_CUR:
self._offset += offset
elif whence == os.SEEK_END:
self._offset = self._file_size + offset
else:
raise ValueError('Whence mode %s is invalid.' % str(whence))
self._offset = min(self._offset, self._file_size)
self._offset = max(self._offset, 0)
if self._remaining():
self._request_next_buffer()
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self._offset
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
class _Buffer(object):
"""In memory buffer."""
def __init__(self):
self.reset()
def reset(self, content='', offset=0):
self._buffer = content
self._offset = offset
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset)
def read_to_offset(self, offset):
"""Returns bytes from self._buffer and update related offsets.
Args:
offset: read from current offset to this offset, exclusive.
Returns:
Requested bytes from buffer.
"""
assert offset >= self._offset
result = self._buffer[self._offset: offset]
self._offset += len(result)
return result
def remaining(self):
return len(self._buffer) - self._offset
def find_newline(self, size=-1):
"""Search for newline char in buffer starting from current offset.
Args:
size: number of bytes to search. -1 means all.
Returns:
offset of newline char in buffer. -1 if doesn't exist.
"""
if size < 0:
return self._buffer.find('\n', self._offset)
return self._buffer.find('\n', self._offset, self._offset + size)
class StreamingBuffer(object):
"""A class for creating large objects using the 'resumable' API.
The API is a subset of the Python writable stream API sufficient to
support writing zip files using the zipfile module.
The exact sequence of calls and use of headers is documented at
https://developers.google.com/storage/docs/developer-guide#unknownresumables
"""
_blocksize = 256 * 1024
_flushsize = 8 * _blocksize
_maxrequestsize = 9 * 4 * _blocksize
def __init__(self,
api,
path,
content_type=None,
gcs_headers=None):
"""Constructor.
Args:
api: A StorageApi instance.
path: Quoted/escaped path to the object, e.g. /mybucket/myfile
content_type: Optional content-type; Default value is
delegate to Google Cloud Storage.
gcs_headers: additional gs headers as a str->str dict, e.g
{'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Raises:
IOError: When this location can not be found.
"""
assert self._maxrequestsize > self._blocksize
assert self._maxrequestsize % self._blocksize == 0
assert self._maxrequestsize >= self._flushsize
self._api = api
self._path = path
self.name = api_utils._unquote_filename(path)
self.closed = False
self._buffer = collections.deque()
self._buffered = 0
self._written = 0
self._offset = 0
headers = {'x-goog-resumable': 'start'}
if content_type:
headers['content-type'] = content_type
if gcs_headers:
headers.update(gcs_headers)
status, resp_headers, content = self._api.post_object(path, headers=headers)
errors.check_status(status, [201], path, headers, resp_headers,
body=content)
loc = resp_headers.get('location')
if not loc:
raise IOError('No location header found in 201 response')
parsed = urlparse.urlparse(loc)
self._path_with_token = '%s?%s' % (self._path, parsed.query)
def __getstate__(self):
"""Store state as part of serialization/pickling.
The contents of the write buffer are stored. Writes to the underlying
storage are required to be on block boundaries (_blocksize) except for the
last write. In the worst case the pickled version of this object may be
slightly larger than the blocksize.
Returns:
A dictionary with the state of this object
"""
return {'api': self._api,
'path': self._path,
'path_token': self._path_with_token,
'buffer': self._buffer,
'buffered': self._buffered,
'written': self._written,
'offset': self._offset,
'closed': self.closed}
def __setstate__(self, state):
"""Restore state as part of deserialization/unpickling.
Args:
state: the dictionary from a __getstate__ call
"""
self._api = state['api']
self._path_with_token = state['path_token']
self._buffer = state['buffer']
self._buffered = state['buffered']
self._written = state['written']
self._offset = state['offset']
self.closed = state['closed']
self._path = state['path']
self.name = api_utils._unquote_filename(self._path)
def write(self, data):
"""Write some bytes.
Args:
data: data to write. str.
Raises:
TypeError: if data is not of type str.
"""
self._check_open()
if not isinstance(data, str):
raise TypeError('Expected str but got %s.' % type(data))
if not data:
return
self._buffer.append(data)
self._buffered += len(data)
self._offset += len(data)
if self._buffered >= self._flushsize:
self._flush()
def flush(self):
"""Flush as much as possible to GCS.
GCS *requires* that all writes except for the final one align on
256KB boundaries. So the internal buffer may still have < 256KB bytes left
after flush.
"""
self._check_open()
self._flush(finish=False)
def tell(self):
"""Return the total number of bytes passed to write() so far.
(There is no seek() method.)
"""
return self._offset
def close(self):
"""Flush the buffer and finalize the file.
When this returns the new file is available for reading.
"""
if not self.closed:
self.closed = True
self._flush(finish=True)
self._buffer = None
def __enter__(self):
return self
def __exit__(self, atype, value, traceback):
self.close()
return False
def _flush(self, finish=False):
"""Internal API to flush.
Buffer is flushed to GCS only when the total amount of buffered data is at
least self._blocksize, or to flush the final (incomplete) block of
the file with finish=True.
"""
while ((finish and self._buffered >= 0) or
(not finish and self._buffered >= self._blocksize)):
tmp_buffer = []
tmp_buffer_len = 0
excess = 0
while self._buffer:
buf = self._buffer.popleft()
size = len(buf)
self._buffered -= size
tmp_buffer.append(buf)
tmp_buffer_len += size
if tmp_buffer_len >= self._maxrequestsize:
excess = tmp_buffer_len - self._maxrequestsize
break
if not finish and (
tmp_buffer_len % self._blocksize + self._buffered <
self._blocksize):
excess = tmp_buffer_len % self._blocksize
break
if excess:
over = tmp_buffer.pop()
size = len(over)
assert size >= excess
tmp_buffer_len -= size
head, tail = over[:-excess], over[-excess:]
self._buffer.appendleft(tail)
self._buffered += len(tail)
if head:
tmp_buffer.append(head)
tmp_buffer_len += len(head)
data = ''.join(tmp_buffer)
file_len = '*'
if finish and not self._buffered:
file_len = self._written + len(data)
self._send_data(data, self._written, file_len)
self._written += len(data)
if file_len != '*':
break
def _send_data(self, data, start_offset, file_len):
"""Send the block to the storage service.
This is a utility method that does not modify self.
Args:
data: data to send in str.
start_offset: start offset of the data in relation to the file.
file_len: an int if this is the last data to append to the file.
Otherwise '*'.
"""
headers = {}
end_offset = start_offset + len(data) - 1
if data:
headers['content-range'] = ('bytes %d-%d/%s' %
(start_offset, end_offset, file_len))
else:
headers['content-range'] = ('bytes */%s' % file_len)
status, response_headers, content = self._api.put_object(
self._path_with_token, payload=data, headers=headers)
if file_len == '*':
expected = 308
else:
expected = 200
errors.check_status(status, [expected], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
def _get_offset_from_gcs(self):
"""Get the last offset that has been written to GCS.
This is a utility method that does not modify self.
Returns:
an int of the last offset written to GCS by this upload, inclusive.
-1 means nothing has been written.
"""
headers = {'content-range': 'bytes */*'}
status, response_headers, content = self._api.put_object(
self._path_with_token, headers=headers)
errors.check_status(status, [308], self._path, headers,
response_headers, content,
{'upload_path': self._path_with_token})
val = response_headers.get('range')
if val is None:
return -1
_, offset = val.rsplit('-', 1)
return int(offset)
def _force_close(self, file_length=None):
"""Close this buffer on file_length.
Finalize this upload immediately on file_length.
Contents that are still in memory will not be uploaded.
This is a utility method that does not modify self.
Args:
file_length: file length. Must match what has been uploaded. If None,
it will be queried from GCS.
"""
if file_length is None:
file_length = self._get_offset_from_gcs() + 1
self._send_data('', 0, file_length)
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
| |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with pianoroll sequences."""
from __future__ import division
import copy
from magenta.music import constants
from magenta.music import events_lib
from magenta.music import sequences_lib
from magenta.pipelines import statistics
from magenta.protobuf import music_pb2
import numpy as np
DEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER
MAX_MIDI_PITCH = 108 # Max piano pitch.
MIN_MIDI_PITCH = 21 # Min piano pitch.
STANDARD_PPQ = constants.STANDARD_PPQ
class PianorollSequence(events_lib.EventSequence):
"""Stores a polyphonic sequence as a pianoroll.
Events are collections of active pitches at each step, offset from
`min_pitch`.
"""
def __init__(self, quantized_sequence=None, events_list=None,
steps_per_quarter=None, start_step=0, min_pitch=MIN_MIDI_PITCH,
max_pitch=MAX_MIDI_PITCH, split_repeats=True, shift_range=False):
"""Construct a PianorollSequence.
Exactly one of `quantized_sequence` or `steps_per_quarter` must be supplied.
At most one of `quantized_sequence` and `events_list` may be supplied.
Args:
quantized_sequence: an optional quantized NoteSequence proto to base
PianorollSequence on.
events_list: an optional list of Pianoroll events to base
PianorollSequence on.
steps_per_quarter: how many steps a quarter note represents. Must be
provided if `quanitzed_sequence` not given.
start_step: The offset of this sequence relative to the
beginning of the source sequence. If a quantized sequence is used as
input, only notes starting after this step will be considered.
min_pitch: The minimum valid pitch value, inclusive.
max_pitch: The maximum valid pitch value, inclusive.
split_repeats: Whether to force repeated notes to have a 0-state step
between them when initializing from a quantized NoteSequence.
shift_range: If True, assume that the given events_list is in the full
MIDI pitch range and needs to be shifted and filtered based on
`min_pitch` and `max_pitch`.
"""
assert (quantized_sequence, steps_per_quarter).count(None) == 1
assert (quantized_sequence, events_list).count(None) >= 1
self._min_pitch = min_pitch
self._max_pitch = max_pitch
if quantized_sequence:
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
self._events = self._from_quantized_sequence(quantized_sequence,
start_step, min_pitch,
max_pitch, split_repeats)
self._steps_per_quarter = (
quantized_sequence.quantization_info.steps_per_quarter)
else:
self._events = []
self._steps_per_quarter = steps_per_quarter
if events_list:
for e in events_list:
self.append(e, shift_range)
self._start_step = start_step
@property
def start_step(self):
return self._start_step
@property
def steps_per_quarter(self):
return self._steps_per_quarter
def set_length(self, steps, from_left=False):
"""Sets the length of the sequence to the specified number of steps.
If the event sequence is not long enough, pads with silence to make the
sequence the specified length. If it is too long, it will be truncated to
the requested length.
Note that this will append a STEP_END event to the end of the sequence if
there is an unfinished step.
Args:
steps: How many quantized steps long the event sequence should be.
from_left: Whether to add/remove from the left instead of right.
"""
if from_left:
raise NotImplementedError('from_left is not supported')
# Then trim or pad as needed.
if self.num_steps < steps:
self._events += [()] * (steps - self.num_steps)
elif self.num_steps > steps:
del self._events[steps:]
assert self.num_steps == steps
def append(self, event, shift_range=False):
"""Appends the event to the end of the sequence.
Args:
event: The polyphonic event to append to the end.
shift_range: If True, assume that the given event is in the full MIDI
pitch range and needs to be shifted and filtered based on `min_pitch`
and `max_pitch`.
Raises:
ValueError: If `event` is not a valid polyphonic event.
"""
if shift_range:
event = tuple(p - self._min_pitch for p in event
if self._min_pitch <= p <= self._max_pitch)
self._events.append(event)
def __len__(self):
"""How many events are in this sequence.
Returns:
Number of events as an integer.
"""
return len(self._events)
def __getitem__(self, i):
"""Returns the event at the given index."""
return self._events[i]
def __iter__(self):
"""Return an iterator over the events in this sequence."""
return iter(self._events)
@property
def end_step(self):
return self.start_step + self.num_steps
@property
def num_steps(self):
"""Returns how many steps long this sequence is.
Returns:
Length of the sequence in quantized steps.
"""
return len(self)
@property
def steps(self):
"""Returns a Python list of the time step at each event in this sequence."""
return list(range(self.start_step, self.end_step))
@staticmethod
def _from_quantized_sequence(
quantized_sequence, start_step, min_pitch, max_pitch, split_repeats):
"""Populate self with events from the given quantized NoteSequence object.
Args:
quantized_sequence: A quantized NoteSequence instance.
start_step: Start converting the sequence at this time step.
Assumed to be the beginning of a bar.
min_pitch: The minimum valid pitch value, inclusive.
max_pitch: The maximum valid pitch value, inclusive.
split_repeats: Whether to force repeated notes to have a 0-state step
between them.
Returns:
A list of events.
"""
piano_roll = np.zeros(
(quantized_sequence.total_quantized_steps - start_step,
max_pitch - min_pitch + 1), np.bool)
for note in quantized_sequence.notes:
if note.quantized_start_step < start_step:
continue
if not min_pitch <= note.pitch <= max_pitch:
continue
note_pitch_offset = note.pitch - min_pitch
note_start_offset = note.quantized_start_step - start_step
note_end_offset = note.quantized_end_step - start_step
if split_repeats:
piano_roll[note_start_offset - 1, note_pitch_offset] = 0
piano_roll[note_start_offset:note_end_offset, note_pitch_offset] = 1
events = [tuple(np.where(frame)[0]) for frame in piano_roll]
return events
def to_sequence(self,
velocity=100,
instrument=0,
program=0,
qpm=constants.DEFAULT_QUARTERS_PER_MINUTE,
base_note_sequence=None):
"""Converts the PianorollSequence to NoteSequence proto.
Args:
velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
instrument: Midi instrument to give each note.
program: Midi program to give each note.
qpm: Quarter notes per minute (float).
base_note_sequence: A NoteSequence to use a starting point. Must match the
specified qpm.
Raises:
ValueError: if an unknown event is encountered.
Returns:
A NoteSequence proto.
"""
seconds_per_step = 60.0 / qpm / self._steps_per_quarter
sequence_start_time = self.start_step * seconds_per_step
if base_note_sequence:
sequence = copy.deepcopy(base_note_sequence)
if sequence.tempos[0].qpm != qpm:
raise ValueError(
'Supplied QPM (%d) does not match QPM of base_note_sequence (%d)'
% (qpm, sequence.tempos[0].qpm))
else:
sequence = music_pb2.NoteSequence()
sequence.tempos.add().qpm = qpm
sequence.ticks_per_quarter = STANDARD_PPQ
step = 0
# Keep a dictionary of open notes for each pitch.
open_notes = {}
for step, event in enumerate(self):
frame_pitches = set(event)
open_pitches = set(open_notes)
for pitch_to_close in open_pitches - frame_pitches:
note_to_close = open_notes[pitch_to_close]
note_to_close.end_time = step * seconds_per_step + sequence_start_time
del open_notes[pitch_to_close]
for pitch_to_open in frame_pitches - open_pitches:
new_note = sequence.notes.add()
new_note.start_time = step * seconds_per_step + sequence_start_time
new_note.pitch = pitch_to_open + self._min_pitch
new_note.velocity = velocity
new_note.instrument = instrument
new_note.program = program
open_notes[pitch_to_open] = new_note
final_step = step + (len(open_notes) > 0) # pylint: disable=g-explicit-length-test
for note_to_close in open_notes.values():
note_to_close.end_time = (
final_step * seconds_per_step + sequence_start_time)
sequence.total_time = seconds_per_step * final_step + sequence_start_time
if sequence.notes:
assert sequence.total_time >= sequence.notes[-1].end_time
return sequence
def extract_pianoroll_sequences(
quantized_sequence, start_step=0, min_steps_discard=None,
max_steps_discard=None, max_steps_truncate=None):
"""Extracts a polyphonic track from the given quantized NoteSequence.
Currently, this extracts only one pianoroll from a given track.
Args:
quantized_sequence: A quantized NoteSequence.
start_step: Start extracting a sequence at this time step. Assumed
to be the beginning of a bar.
min_steps_discard: Minimum length of tracks in steps. Shorter tracks are
discarded.
max_steps_discard: Maximum length of tracks in steps. Longer tracks are
discarded. Mutually exclusive with `max_steps_truncate`.
max_steps_truncate: Maximum length of tracks in steps. Longer tracks are
truncated. Mutually exclusive with `max_steps_discard`.
Returns:
pianoroll_seqs: A python list of PianorollSequence instances.
stats: A dictionary mapping string names to `statistics.Statistic` objects.
Raises:
ValueError: If both `max_steps_discard` and `max_steps_truncate` are
specified.
"""
if (max_steps_discard, max_steps_truncate).count(None) == 0:
raise ValueError(
'Only one of `max_steps_discard` and `max_steps_truncate` can be '
'specified.')
sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in
['pianoroll_tracks_truncated_too_long',
'pianoroll_tracks_discarded_too_short',
'pianoroll_tracks_discarded_too_long',
'pianoroll_tracks_discarded_more_than_1_program'])
steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(
quantized_sequence)
# Create a histogram measuring lengths (in bars not steps).
stats['pianoroll_track_lengths_in_bars'] = statistics.Histogram(
'pianoroll_track_lengths_in_bars',
[0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])
# Allow only 1 program.
programs = set()
for note in quantized_sequence.notes:
programs.add(note.program)
if len(programs) > 1:
stats['pianoroll_tracks_discarded_more_than_1_program'].increment()
return [], stats.values()
# Translate the quantized sequence into a PianorollSequence.
pianoroll_seq = PianorollSequence(quantized_sequence=quantized_sequence,
start_step=start_step)
pianoroll_seqs = []
num_steps = pianoroll_seq.num_steps
if min_steps_discard is not None and num_steps < min_steps_discard:
stats['pianoroll_tracks_discarded_too_short'].increment()
elif max_steps_discard is not None and num_steps > max_steps_discard:
stats['pianoroll_tracks_discarded_too_long'].increment()
else:
if max_steps_truncate is not None and num_steps > max_steps_truncate:
stats['pianoroll_tracks_truncated_too_long'].increment()
pianoroll_seq.set_length(max_steps_truncate)
pianoroll_seqs.append(pianoroll_seq)
stats['pianoroll_track_lengths_in_bars'].increment(
num_steps // steps_per_bar)
return pianoroll_seqs, stats.values()
| |
import random
from BattleBase import *
import DistributedBattleBase
import MovieUtil
import SuitBattleGlobals
from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import State
from direct.interval.IntervalGlobal import *
from otp.avatar import Emote
from otp.nametag import NametagGlobals
from otp.nametag.NametagConstants import *
from pandac.PandaModules import *
from toontown.suit import Suit
from toontown.suit import SuitDNA
from toontown.toon import TTEmote
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
class DistributedBattleBldg(DistributedBattleBase.DistributedBattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBldg')
camFOFov = 30.0
camFOPos = Point3(0, -10, 4)
def __init__(self, cr):
townBattle = cr.playGame.getPlace().townBattle
DistributedBattleBase.DistributedBattleBase.__init__(self, cr, townBattle)
self.streetBattle = 0
self.fsm.addState(State.State('BuildingReward', self.enterBuildingReward, self.exitBuildingReward, ['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('BuildingReward')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('BuildingReward')
def generate(self):
DistributedBattleBase.DistributedBattleBase.generate(self)
def setBossBattle(self, value):
self.bossBattle = value
if self.bossBattle:
self.battleMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.ogg')
else:
self.battleMusic = base.loadMusic('phase_7/audio/bgm/encntr_general_bg_indoor.ogg')
base.playMusic(self.battleMusic, looping=1, volume=0.9)
def getBossBattleTaunt(self):
return TTLocalizer.BattleBldgBossTaunt
def disable(self):
DistributedBattleBase.DistributedBattleBase.disable(self)
self.battleMusic.stop()
def delete(self):
DistributedBattleBase.DistributedBattleBase.delete(self)
del self.battleMusic
def buildJoinPointList(self, avPos, destPos, toon = 0):
return []
def __faceOff(self, ts, name, callback):
if len(self.suits) == 0:
self.notify.warning('__faceOff(): no suits.')
return
if len(self.toons) == 0:
self.notify.warning('__faceOff(): no toons.')
return
elevatorPos = self.toons[0].getPos()
if len(self.suits) == 1:
leaderIndex = 0
elif self.bossBattle == 1:
leaderIndex = 1
else:
maxTypeNum = -1
for suit in self.suits:
suitTypeNum = SuitDNA.getSuitType(suit.dna.name)
if maxTypeNum < suitTypeNum:
maxTypeNum = suitTypeNum
leaderIndex = self.suits.index(suit)
delay = FACEOFF_TAUNT_T
suitTrack = Parallel()
suitLeader = None
for suit in self.suits:
suit.setState('Battle')
suitIsLeader = 0
oneSuitTrack = Sequence()
oneSuitTrack.append(Func(suit.loop, 'neutral'))
oneSuitTrack.append(Func(suit.headsUp, elevatorPos))
if self.suits.index(suit) == leaderIndex:
suitLeader = suit
suitIsLeader = 1
if self.bossBattle == 1:
taunt = self.getBossBattleTaunt()
else:
taunt = SuitBattleGlobals.getFaceoffTaunt(suit.getStyleName(), suit.doId)
oneSuitTrack.append(Func(suit.setChatAbsolute, taunt, CFSpeech | CFTimeout))
destPos, destHpr = self.getActorPosHpr(suit, self.suits)
oneSuitTrack.append(Wait(delay))
if suitIsLeader == 1:
oneSuitTrack.append(Func(suit.clearChat))
oneSuitTrack.append(self.createAdjustInterval(suit, destPos, destHpr))
suitTrack.append(oneSuitTrack)
toonTrack = Parallel()
for toon in self.toons:
oneToonTrack = Sequence()
destPos, destHpr = self.getActorPosHpr(toon, self.toons)
oneToonTrack.append(Wait(delay))
oneToonTrack.append(self.createAdjustInterval(toon, destPos, destHpr, toon=1, run=1))
toonTrack.append(oneToonTrack)
camTrack = Sequence()
def setCamFov(fov):
base.camLens.setMinFov(fov/(4./3.))
camTrack.append(Func(camera.wrtReparentTo, suitLeader))
camTrack.append(Func(setCamFov, self.camFOFov))
suitHeight = suitLeader.getHeight()
suitOffsetPnt = Point3(0, 0, suitHeight)
MidTauntCamHeight = suitHeight * 0.66
MidTauntCamHeightLim = suitHeight - 1.8
if MidTauntCamHeight < MidTauntCamHeightLim:
MidTauntCamHeight = MidTauntCamHeightLim
TauntCamY = 18
TauntCamX = 0
TauntCamHeight = random.choice((MidTauntCamHeight, 1, 11))
camTrack.append(Func(camera.setPos, TauntCamX, TauntCamY, TauntCamHeight))
camTrack.append(Func(camera.lookAt, suitLeader, suitOffsetPnt))
camTrack.append(Wait(delay))
camPos = Point3(0, -6, 4)
camHpr = Vec3(0, 0, 0)
camTrack.append(Func(camera.reparentTo, base.localAvatar))
camTrack.append(Func(setCamFov, ToontownGlobals.DefaultCameraFov))
camTrack.append(Func(camera.setPosHpr, camPos, camHpr))
mtrack = Parallel(suitTrack, toonTrack, camTrack)
done = Func(callback)
track = Sequence(mtrack, done, name=name)
track.start(ts)
self.storeInterval(track, name)
return
def enterFaceOff(self, ts):
if len(self.toons) > 0 and base.localAvatar == self.toons[0]:
Emote.globalEmote.disableAll(self.toons[0], 'dbattlebldg, enterFaceOff')
self.delayDeleteMembers()
self.__faceOff(ts, self.faceOffName, self.__handleFaceOffDone)
return None
def __handleFaceOffDone(self):
self.notify.debug('FaceOff done')
self.d_faceOffDone(base.localAvatar.doId)
def exitFaceOff(self):
self.notify.debug('exitFaceOff()')
if len(self.toons) > 0 and base.localAvatar == self.toons[0]:
Emote.globalEmote.releaseAll(self.toons[0], 'dbattlebldg exitFaceOff')
self.clearInterval(self.faceOffName)
self._removeMembersKeep()
camera.wrtReparentTo(self)
base.camLens.setMinFov(self.camFov/(4./3.))
return None
def __playReward(self, ts, callback):
toonTracks = Parallel()
for toon in self.toons:
toonTracks.append(Sequence(Func(toon.loop, 'victory'), Wait(FLOOR_REWARD_TIMEOUT), Func(toon.loop, 'neutral')))
name = self.uniqueName('floorReward')
track = Sequence(toonTracks, Func(callback), name=name)
camera.setPos(0, 0, 1)
camera.setHpr(180, 10, 0)
self.storeInterval(track, name)
track.start(ts)
def enterReward(self, ts):
self.notify.debug('enterReward()')
self.delayDeleteMembers()
self.__playReward(ts, self.__handleFloorRewardDone)
return None
def __handleFloorRewardDone(self):
return None
def exitReward(self):
self.notify.debug('exitReward()')
self.clearInterval(self.uniqueName('floorReward'))
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
for toon in self.toons:
toon.startSmooth()
return None
def enterBuildingReward(self, ts):
self.delayDeleteMembers()
if self.hasLocalToon():
NametagGlobals.setMasterArrowsOn(0)
self.movie.playReward(ts, self.uniqueName('building-reward'), self.__handleBuildingRewardDone, noSkip=True)
return None
def __handleBuildingRewardDone(self):
if self.hasLocalToon():
self.d_rewardDone(base.localAvatar.doId)
self.movie.resetReward()
self.fsm.request('Resume')
def exitBuildingReward(self):
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
return None
def enterResume(self, ts = 0):
if self.hasLocalToon():
self.removeLocalToon()
return None
def exitResume(self):
return None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""User-facing Tuning API"""
import logging
import os.path
from typing import Callable, Dict, List, Optional, Union
import tvm
from tvm import relay
from tvm._ffi import register_func
from tvm.ir import IRModule, structural_equal, structural_hash
from tvm.relay import Function as RelayFunc
from tvm.runtime import Module, NDArray
from tvm.target import Target
from tvm.te import Tensor, create_prim_func
from tvm.tir import PrimFunc, Schedule
from .builder import Builder, LocalBuilder
from .cost_model import CostModel, XGBModel
from .database import Database, JSONDatabase, TuningRecord
from .feature_extractor import PerStoreFeature
from .integration import ApplyHistoryBest, extract_task_from_relay
from .measure_callback import MeasureCallback
from .mutator import Mutator
from .postproc import Postproc
from .runner import LocalRunner, Runner
from .schedule_rule import ScheduleRule
from .search_strategy import (
EvolutionarySearchConfig,
ReplayFuncConfig,
ReplayTraceConfig,
)
from .space_generator import PostOrderApply, SpaceGenerator
from .task_scheduler import RoundRobin, TaskScheduler
from .tune_context import TuneContext
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
SearchStrategyConfig = Union[
ReplayFuncConfig,
ReplayTraceConfig,
EvolutionarySearchConfig,
]
FnSpaceGenerator = Callable[[], SpaceGenerator]
FnScheduleRule = Callable[[], List[ScheduleRule]]
FnPostproc = Callable[[], List[Postproc]]
FnMutatorProb = Callable[[], Dict[Mutator, float]]
FnTaskScheduler = Callable[
[
List[TuneContext],
Builder,
Runner,
Database,
CostModel,
List[MeasureCallback],
],
TaskScheduler,
]
class DefaultLLVM:
"""Default tuning configuration for LLVM."""
@staticmethod
def _sch_rules() -> List[ScheduleRule]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
schedule_rule as M,
)
return [
M.AutoInline(
into_producer=False,
into_consumer=True,
inline_const_tensor=True,
disallow_if_then_else=True,
require_injective=True,
require_ordered=True,
disallow_op=["tir.exp"],
),
M.AddRFactor(max_jobs_per_core=16, max_innermost_factor=64),
M.MultiLevelTiling(
structure="SSRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=M.ReuseType(
req="may",
levels=[1, 2],
scope="global",
),
),
M.ParallelizeVectorizeUnroll(
max_jobs_per_core=16,
max_vectorize_extent=64,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
M.RandomComputeLocation(),
]
@staticmethod
def _postproc() -> List[Postproc]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
postproc as M,
)
return [
M.DisallowDynamicLoop(),
M.RewriteParallelVectorizeUnroll(),
M.RewriteReductionBlock(),
]
@staticmethod
def _mutator_probs() -> Dict[Mutator, float]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
mutator as M,
)
return {
M.MutateTileSize(): 0.9,
M.MutateComputeLocation(): 0.05,
M.MutateUnroll(): 0.03,
M.MutateParallel(max_jobs_per_core=16): 0.02,
}
class DefaultCUDA:
"""Default tuning configuration for CUDA."""
@staticmethod
def _sch_rules() -> List[ScheduleRule]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
schedule_rule as M,
)
return [
M.MultiLevelTiling(
structure="SSSRRSRS",
tile_binds=["blockIdx.x", "vthread.x", "threadIdx.x"],
max_innermost_factor=64,
vector_load_lens=[1, 2, 3, 4],
reuse_read=M.ReuseType(
req="must",
levels=[4],
scope="shared",
),
reuse_write=M.ReuseType(
req="must",
levels=[3],
scope="local",
),
),
M.AutoInline(
into_producer=True,
into_consumer=True,
# into_cache_only=False,
inline_const_tensor=True,
disallow_if_then_else=False,
require_injective=False,
require_ordered=False,
disallow_op=None,
),
M.CrossThreadReduction(thread_extents=[4, 8, 16, 32, 64, 128, 256, 512]),
M.ParallelizeVectorizeUnroll(
max_jobs_per_core=-1, # disable parallelize
max_vectorize_extent=-1, # disable vectorize
unroll_max_steps=[0, 16, 64, 512, 1024],
unroll_explicit=True,
),
]
@staticmethod
def _postproc() -> List[Postproc]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
postproc as M,
)
return [
M.DisallowDynamicLoop(),
M.RewriteCooperativeFetch(),
M.RewriteUnboundBlock(),
M.RewriteParallelVectorizeUnroll(),
M.RewriteReductionBlock(),
M.VerifyGPUCode(),
]
@staticmethod
def _mutator_probs() -> Dict[Mutator, float]:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
mutator as M,
)
return {
# M.MutateTileSize(): 0.9,
M.MutateUnroll(): 0.1,
}
class Parse:
"""Parse tuning configuration from user inputs."""
@staticmethod
@register_func("tvm.meta_schedule.tune.parse_mod") # for use in ApplyHistoryBest
def _mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
if isinstance(mod, PrimFunc):
mod = mod.with_attr("global_symbol", "main")
mod = mod.with_attr("tir.noalias", True)
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
# in order to make sure the mod can be found in ApplyHistoryBest
# different func name can cause structural unequal
func_names = mod.get_global_vars()
(func_name,) = func_names
if len(func_names) == 1 and func_name != "main":
mod = IRModule({"main": mod[func_name]})
return mod
@staticmethod
def _target(target: Union[str, Target]) -> Target:
if isinstance(target, str):
target = Target(target)
if not isinstance(target, Target):
raise TypeError(f"Expected `target` to be str or Target, but gets: {target}")
return target
@staticmethod
def _builder(builder: Optional[Builder]) -> Builder:
if builder is None:
builder = LocalBuilder()
if not isinstance(builder, Builder):
raise TypeError(f"Expected `builder` to be Builder, but gets: {builder}")
return builder
@staticmethod
def _runner(runner: Optional[Runner]) -> Runner:
if runner is None:
runner = LocalRunner()
if not isinstance(runner, Runner):
raise TypeError(f"Expected `runner` to be Runner, but gets: {runner}")
return runner
@staticmethod
def _database(database: Union[None, Database], task_name: str, path: str) -> Database:
if database is None:
path_workload = os.path.join(path, f"{task_name}_database_workload.json")
path_tuning_record = os.path.join(path, f"{task_name}_database_tuning_record.json")
logger.info(
"Creating JSONDatabase. Workload at: %s. Tuning records at: %s",
path_workload,
path_tuning_record,
)
database = JSONDatabase(
path_workload=path_workload,
path_tuning_record=path_tuning_record,
)
if not isinstance(database, Database):
raise TypeError(f"Expected `database` to be Database, but gets: {database}")
return database
@staticmethod
def _callbacks(
measure_callbacks: Optional[List[MeasureCallback]],
) -> List[MeasureCallback]:
if measure_callbacks is None:
from tvm.meta_schedule import ( # pylint: disable=import-outside-toplevel
measure_callback as M,
)
return [
M.AddToDatabase(),
M.RemoveBuildArtifact(),
M.EchoStatistics(),
M.UpdateCostModel(),
]
if not isinstance(measure_callbacks, (list, tuple)):
raise TypeError(
f"Expected `measure_callbacks` to be List[MeasureCallback], "
f"but gets: {measure_callbacks}"
)
measure_callbacks = list(measure_callbacks)
for i, callback in enumerate(measure_callbacks):
if not isinstance(callback, MeasureCallback):
raise TypeError(
f"Expected `measure_callbacks` to be List[MeasureCallback], "
f"but measure_callbacks[{i}] is: {callback}"
)
return measure_callbacks
@staticmethod
def _cost_model(cost_model: Optional[CostModel]) -> CostModel:
if cost_model is None:
return XGBModel(extractor=PerStoreFeature())
if not isinstance(cost_model, CostModel):
raise TypeError(f"Expected `cost_model` to be CostModel, but gets: {cost_model}")
return cost_model
@staticmethod
def _space_generator(space_generator: Optional[FnSpaceGenerator]) -> SpaceGenerator:
if space_generator is None:
return PostOrderApply()
if callable(space_generator):
space_generator = space_generator()
if not isinstance(space_generator, SpaceGenerator):
raise TypeError(
f"Expected `space_generator` to return SpaceGenerator, "
f"but gets: {space_generator}"
)
return space_generator
@staticmethod
def _sch_rules(sch_rules: Optional[FnScheduleRule], target: Target) -> List[ScheduleRule]:
if callable(sch_rules):
return sch_rules()
if sch_rules is not None:
raise TypeError(f"Expected `sch_rules` to be None or callable, but gets: {sch_rules}")
# pylint: disable=protected-access
if target.kind.name == "llvm":
return DefaultLLVM._sch_rules()
if target.kind.name == "cuda":
return DefaultCUDA._sch_rules()
# pylint: enable=protected-access
raise ValueError(f"Unsupported target: {target}")
@staticmethod
def _postproc(postproc: Optional[FnPostproc], target: Target) -> List[Postproc]:
if callable(postproc):
return postproc()
if postproc is not None:
raise TypeError(f"Expected `postproc` to be None or callable, but gets: {postproc}")
# pylint: disable=protected-access
if target.kind.name == "llvm":
return DefaultLLVM._postproc()
if target.kind.name == "cuda":
return DefaultCUDA._postproc()
# pylint: enable=protected-access
raise ValueError(f"Unsupported target: {target}")
@staticmethod
def _mutator_probs(
mutator_probs: Optional[FnMutatorProb],
target: Target,
) -> Dict[Mutator, float]:
if callable(mutator_probs):
return mutator_probs()
if mutator_probs is not None:
raise TypeError(
f"Expected `mutator_probs` to be None or callable, but gets: {mutator_probs}"
)
# pylint: disable=protected-access
if target.kind.name == "llvm":
return DefaultLLVM._mutator_probs()
if target.kind.name == "cuda":
return DefaultCUDA._mutator_probs()
# pylint: enable=protected-access
raise ValueError(f"Unsupported target: {target}")
@staticmethod
def _tune_context(
tune_context: Optional[TuneContext],
mod: IRModule,
target: Target,
config: SearchStrategyConfig,
task_name: str,
space_generator: Optional[FnSpaceGenerator],
sch_rules: Optional[FnScheduleRule],
postprocs: Optional[FnPostproc],
mutator_probs: Optional[FnMutatorProb],
num_threads: Optional[int],
) -> TuneContext:
if tune_context is None:
return TuneContext(
mod=mod,
target=target,
# pylint: disable=protected-access
space_generator=Parse._space_generator(space_generator),
search_strategy=config.create_strategy(),
sch_rules=Parse._sch_rules(sch_rules, target),
postprocs=Parse._postproc(postprocs, target),
mutator_probs=Parse._mutator_probs(mutator_probs, target),
# pylint: enable=protected-access
task_name=task_name,
rand_state=-1,
num_threads=num_threads,
)
if not isinstance(tune_context, TuneContext):
raise TypeError(f"Expected `tune_context` to be TuneContext, but gets: {tune_context}")
return tune_context
@staticmethod
def _task_scheduler(
task_scheduler: Union[None, TaskScheduler, FnTaskScheduler],
tasks: List[TuneContext],
builder: Builder,
runner: Runner,
database: Database,
cost_model: CostModel,
measure_callbacks: List[MeasureCallback],
):
if task_scheduler is None:
return RoundRobin(
tasks=tasks,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
)
if callable(task_scheduler):
return task_scheduler(
tasks,
builder,
runner,
database,
cost_model,
measure_callbacks,
)
if not isinstance(task_scheduler, TaskScheduler):
raise TypeError(
f"Expected `task_scheduler` to be TaskScheduler, but gets: {task_scheduler}"
)
return task_scheduler
def tune_tir(
mod: Union[IRModule, PrimFunc],
target: Union[str, Target],
config: SearchStrategyConfig,
work_dir: str,
*,
task_name: str = "main",
builder: Optional[Builder] = None,
runner: Optional[Runner] = None,
database: Optional[Database] = None,
cost_model: Optional[CostModel] = None,
measure_callbacks: Optional[List[MeasureCallback]] = None,
task_scheduler: Optional[TaskScheduler] = None,
space: Optional[FnSpaceGenerator] = None,
sch_rules: Optional[FnScheduleRule] = None,
postprocs: Optional[FnPostproc] = None,
mutator_probs: Optional[FnMutatorProb] = None,
num_threads: Optional[int] = None,
) -> Optional[Schedule]:
"""Tune a TIR IRModule with a given target.
Parameters
----------
mod : Union[IRModule, PrimFunc]
The module to tune.
target : Union[str, Target]
The target to tune for.
config : SearchStrategyConfig
The search strategy config.
task_name : str
The name of the task.
work_dir : Optional[str]
The working directory to save intermediate results.
builder : Optional[Builder]
The builder to use.
runner : Optional[Runner]
The runner to use.
database : Optional[Database]
The database to use.
cost_model : Optional[CostModel]
The cost model to use.
measure_callbacks : Optional[List[MeasureCallback]]
The callbacks used during tuning.
f_tune_context : Optional[TYPE_F_TUNE_CONTEXT]
The function to create TuneContext.
f_task_scheduler : Optional[TYPE_F_TASK_SCHEDULER]
The function to create TaskScheduler.
Returns
-------
sch : Optional[Schedule]
The tuned schedule.
"""
logger.info("Working directory: %s", work_dir)
# pylint: disable=protected-access
mod = Parse._mod(mod)
database = Parse._database(database, task_name, work_dir)
tune_context = Parse._tune_context(
tune_context=None,
mod=mod,
target=Parse._target(target),
config=config,
task_name=task_name,
space_generator=space,
sch_rules=sch_rules,
postprocs=postprocs,
mutator_probs=mutator_probs,
num_threads=num_threads,
)
task_scheduler = Parse._task_scheduler(
task_scheduler,
[tune_context],
builder=Parse._builder(builder),
runner=Parse._runner(runner),
database=database,
cost_model=Parse._cost_model(cost_model),
measure_callbacks=Parse._callbacks(measure_callbacks),
)
# pylint: enable=protected-access
task_scheduler.tune()
bests: List[TuningRecord] = database.get_top_k(
database.commit_workload(mod),
top_k=1,
)
if not bests:
return None
assert len(bests) == 1
sch = Schedule(mod)
bests[0].trace.apply_to_schedule(sch, remove_postproc=False)
task_scheduler.cost_model.save(os.path.join(work_dir, f"{task_name}.xgb"))
return sch
def tune_te(
tensors: List[Tensor],
target: Union[str, Target],
config: SearchStrategyConfig,
work_dir: str,
*,
task_name: str = "main",
builder: Optional[Builder] = None,
runner: Optional[Runner] = None,
database: Optional[Database] = None,
cost_model: Optional[CostModel] = None,
measure_callbacks: Optional[List[MeasureCallback]] = None,
task_scheduler: Optional[TaskScheduler] = None,
space: Optional[FnSpaceGenerator] = None,
sch_rules: Optional[FnScheduleRule] = None,
postprocs: Optional[FnPostproc] = None,
mutator_probs: Optional[FnMutatorProb] = None,
num_threads: Optional[int] = None,
) -> Optional[Schedule]:
"""Tune a TE compute DAG with a given target.
Parameters
----------
tensor : List[Tensor]
The list of input/output tensors of the TE compute DAG.
target : Union[str, Target]
The target to tune for.
config : SearchStrategyConfig
The search strategy config.
task_name : str
The name of the task.
work_dir : Optional[str]
The working directory to save intermediate results.
builder : Optional[Builder]
The builder to use.
runner : Optional[Runner]
The runner to use.
database : Optional[Database]
The database to use.
measure_callbacks : Optional[List[MeasureCallback]]
The callbacks used during tuning.
f_tune_context : Optional[TYPE_F_TUNE_CONTEXT]
The function to create TuneContext.
f_task_scheduler : Optional[TYPE_F_TASK_SCHEDULER]
The function to create TaskScheduler.
Returns
-------
sch : Optional[Schedule]
The tuned schedule.
"""
return tune_tir(
mod=create_prim_func(tensors),
target=target,
config=config,
work_dir=work_dir,
task_name=task_name,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
sch_rules=sch_rules,
postprocs=postprocs,
mutator_probs=mutator_probs,
num_threads=num_threads,
)
def tune_relay(
mod: Union[RelayFunc, IRModule],
target: Union[str, Target],
config: SearchStrategyConfig,
work_dir: str,
*,
params: Optional[Dict[str, NDArray]] = None,
task_name: str = "main",
builder: Optional[Builder] = None,
runner: Optional[Runner] = None,
database: Optional[Database] = None,
cost_model: Optional[CostModel] = None,
measure_callbacks: Optional[List[MeasureCallback]] = None,
task_scheduler: Optional[TaskScheduler] = None,
space: Optional[FnSpaceGenerator] = None,
sch_rules: Optional[FnScheduleRule] = None,
postprocs: Optional[FnPostproc] = None,
mutator_probs: Optional[FnMutatorProb] = None,
num_threads: Optional[int] = None,
) -> Module:
"""Tune a TIR IRModule with a given target.
Parameters
----------
mod : Union[RelayFunc, IRModule]
The module to tune.
target : Union[str, Target]
The target to tune for.
config : SearchStrategyConfig
The search strategy config.
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
task_name : str
The name of the task.
work_dir : Optional[str]
The working directory to save intermediate results.
builder : Optional[Builder]
The builder to use.
runner : Optional[Runner]
The runner to use.
database : Optional[Database]
The database to use.
measure_callbacks : Optional[List[MeasureCallback]]
The callbacks used during tuning.
f_tune_context : Optional[TYPE_F_TUNE_CONTEXT]
The function to create TuneContext.
f_task_scheduler : Optional[TYPE_F_TASK_SCHEDULER]
The function to create TaskScheduler.
Returns
-------
lib : Module
The built runtime module for the given relay workload.
"""
logger.info("Working directory: %s", work_dir)
extracted_tasks = extract_task_from_relay(mod, target, params)
# pylint: disable=protected-access
tune_contexts = []
target = Parse._target(target)
database = Parse._database(database, task_name, work_dir)
# parse the tuning contexts
for task in extracted_tasks:
assert len(task.dispatched) == 1, "Only size 1 dispatched task list is supported for now"
tune_contexts.append(
Parse._tune_context(
tune_context=None,
mod=Parse._mod(task.dispatched[0]),
target=target,
config=config,
task_name=task.task_name,
space_generator=space,
sch_rules=sch_rules,
postprocs=postprocs,
mutator_probs=mutator_probs,
num_threads=num_threads,
)
)
# deduplication
logger.info("Before task deduplication: %d tasks", len(tune_contexts))
tasks: List[TuneContext] = []
hashs: List[int] = []
for i, task in enumerate(tune_contexts):
struct_hash: int = structural_hash(task.mod)
flag: bool = False
if struct_hash in hashs:
for other_task in tune_contexts[i + 1 :]:
if structural_equal(task.mod, other_task.mod):
flag = True
break
if not flag:
tasks.append(task)
hashs.append(struct_hash)
logger.info("After task deduplication: %d tasks", len(tasks))
# parse the task scheduler
task_scheduler = Parse._task_scheduler(
task_scheduler,
tasks,
builder=Parse._builder(builder),
runner=Parse._runner(runner),
database=database,
cost_model=Parse._cost_model(cost_model),
measure_callbacks=Parse._callbacks(measure_callbacks),
)
# pylint: enable=protected-access
task_scheduler.tune()
with ApplyHistoryBest(database):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
return relay.build(mod, target=target, params=params)
| |
import numpy as np
from collections import namedtuple
from numba import vectorize
from numba import cuda, int32, float32, float64
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.testing import CUDATestCase
import unittest
sig = [int32(int32, int32),
float32(float32, float32),
float64(float64, float64)]
test_dtypes = np.float32, np.int32
@skip_on_cudasim('ufunc API unsupported in the simulator')
class TestCUDAVectorize(CUDATestCase):
N = 1000001
def test_scalar(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
a = 1.2
b = 2.3
c = vector_add(a, b)
self.assertEqual(c, a + b)
def test_1d(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
# build python ufunc
np_ufunc = np.add
# test it out
def test(ty):
data = np.array(np.random.random(self.N), dtype=ty)
result = cuda_ufunc(data, data)
gold = np_ufunc(data, data)
self.assertTrue(np.allclose(gold, result), (gold, result))
test(np.double)
test(np.float32)
test(np.int32)
def test_1d_async(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
# build python ufunc
np_ufunc = np.add
# test it out
def test(ty):
data = np.array(np.random.random(self.N), dtype=ty)
stream = cuda.stream()
device_data = cuda.to_device(data, stream)
dresult = cuda_ufunc(device_data, device_data, stream=stream)
result = dresult.copy_to_host()
stream.synchronize()
gold = np_ufunc(data, data)
self.assertTrue(np.allclose(gold, result), (gold, result))
test(np.double)
test(np.float32)
test(np.int32)
def test_nd(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
def test(dtype, order, nd, size=4):
data = np.random.random((size,) * nd).astype(dtype)
data[data != data] = 2.4
data[data == float('inf')] = 3.8
data[data == float('-inf')] = -3.8
data2 = np.array(data.T, order=order) # .copy(order=order)
result = data + data2
our_result = cuda_ufunc(data, data2)
self.assertTrue(np.allclose(result, our_result),
(dtype, order, result, our_result))
for nd in range(1, 8):
for dtype in test_dtypes:
for order in ('C', 'F'):
test(dtype, order, nd)
def test_ufunc_attrib(self):
self.reduce_test(8)
self.reduce_test(100)
self.reduce_test(2 ** 10 + 1)
self.reduce_test2(8)
self.reduce_test2(100)
self.reduce_test2(2 ** 10 + 1)
def test_output_arg(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
A = np.arange(10, dtype=np.float32)
B = np.arange(10, dtype=np.float32)
C = np.empty_like(A)
vector_add(A, B, out=C)
self.assertTrue(np.allclose(A + B, C))
def reduce_test(self, n):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
x = np.arange(n, dtype=np.int32)
gold = np.add.reduce(x)
result = cuda_ufunc.reduce(x)
self.assertEqual(result, gold)
def reduce_test2(self, n):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
x = np.arange(n, dtype=np.int32)
gold = np.add.reduce(x)
stream = cuda.stream()
dx = cuda.to_device(x, stream)
result = cuda_ufunc.reduce(dx, stream=stream)
self.assertEqual(result, gold)
def test_auto_transfer(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
n = 10
x = np.arange(n, dtype=np.int32)
dx = cuda.to_device(x)
y = cuda_ufunc(x, dx).copy_to_host()
np.testing.assert_equal(y, x + x)
def test_ufunc_output_ravel(self):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
cuda_ufunc = vector_add
n = 10
x = np.arange(n, dtype=np.int32).reshape(2, 5)
dx = cuda.to_device(x)
cuda_ufunc(dx, dx, out=dx)
got = dx.copy_to_host()
expect = x + x
np.testing.assert_equal(got, expect)
def check_tuple_arg(self, a, b):
@vectorize(sig, target='cuda')
def vector_add(a, b):
return a + b
r = vector_add(a, b)
np.testing.assert_equal(np.asarray(a) + np.asarray(b), r)
def test_tuple_arg(self):
a = (1.0, 2.0, 3.0)
b = (4.0, 5.0, 6.0)
self.check_tuple_arg(a, b)
def test_namedtuple_arg(self):
Point = namedtuple('Point', ('x', 'y', 'z'))
a = Point(x=1.0, y=2.0, z=3.0)
b = Point(x=4.0, y=5.0, z=6.0)
self.check_tuple_arg(a, b)
def test_tuple_of_array_arg(self):
arr = np.arange(10, dtype=np.int32)
a = (arr, arr + 1)
b = (arr + 2, arr + 2)
self.check_tuple_arg(a, b)
def test_tuple_of_namedtuple_arg(self):
Point = namedtuple('Point', ('x', 'y', 'z'))
a = (Point(x=1.0, y=2.0, z=3.0), Point(x=1.5, y=2.5, z=3.5))
b = (Point(x=4.0, y=5.0, z=6.0), Point(x=4.5, y=5.5, z=6.5))
self.check_tuple_arg(a, b)
def test_namedtuple_of_array_arg(self):
xs1 = np.arange(10, dtype=np.int32)
ys1 = xs1 + 2
xs2 = np.arange(10, dtype=np.int32) * 2
ys2 = xs2 + 1
Points = namedtuple('Points', ('xs', 'ys'))
a = Points(xs=xs1, ys=ys1)
b = Points(xs=xs2, ys=ys2)
self.check_tuple_arg(a, b)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest, get_read_offset_function
from struct_offsets import *
from vulkan_constants import *
GRAPHICS_PIPELINE_CREATE_INFO = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("stageCount", UINT32_T),
("pStages", POINTER),
("pVertexInputState", POINTER),
("pInputAssemblyState", POINTER),
("pTessellationState", POINTER),
("pViewportState", POINTER),
("pRasterizationState", POINTER),
("pMultisampleState", POINTER),
("pDepthStencilState", POINTER),
("pColorBlendState", POINTER),
("pDynamicState", POINTER),
("layout", POINTER),
("renderPass", HANDLE),
("subpass", UINT32_T),
("basePipelineHandle", HANDLE),
("basePipelineIndex", INT32_T)
]
PIPELINE_SHADER_STAGE_CREATE_INFO = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("stage", UINT32_T),
("module", HANDLE),
("pName", POINTER),
("pSpecializationInfo", POINTER)
]
SPECIALIZATION_INFO = [
("mapEntryCount", UINT32_T),
("pMapEntries", POINTER),
("dataSize", SIZE_T),
("pData", POINTER)
]
VERTEX_INPUT_STATE_CREATE_INFO = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("vertexBindingDescriptionCount", UINT32_T),
("pVertexBindingDescriptions", POINTER),
("vertexAttributeDescriptionCount", UINT32_T),
("pVertexAttributeDescriptions", POINTER)
]
VERTEX_INPUT_BINDING_DESCRIPTION = [
("binding", UINT32_T),
("stride", UINT32_T),
("inputRate", UINT32_T)
]
VERTEX_INPUT_ATTRIBUTE_DESCRIPTION = [
("location", UINT32_T),
("binding", UINT32_T),
("format", UINT32_T),
("offset", UINT32_T)
]
PIPELINE_TESSELLATION_STATE_CREATE_INFO = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("patchControlPoints", UINT32_T)
]
INPUT_ASSEMBLY_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("topology", UINT32_T),
("primitiveRestartEnable", BOOL32)
]
VIEWPORT_STATE_CREATE_INFO = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("viewportCount", UINT32_T),
("pViewports", POINTER),
("scissorCount", UINT32_T),
("pScissors", POINTER)
]
VIEWPORT = [
("x", FLOAT),
("y", FLOAT),
("width", FLOAT),
("height", FLOAT),
("minDepth", FLOAT),
("maxDepth", FLOAT)
]
RECT2D = [
("offset_x", INT32_T),
("offset_y", INT32_T),
("extent_width", INT32_T),
("extent_height", INT32_T)
]
PIPELINE_RASERIZATION_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("depthClampEnable", BOOL32),
("rasterizerDiscardEnable", BOOL32),
("polygonMode", UINT32_T),
("cullMode", UINT32_T),
("frontFace", UINT32_T),
("depthBiasEnable", BOOL32),
("depthBiasConstantFactor", FLOAT),
("depthBiasClamp", FLOAT),
("depthBiasSlopeFactor", FLOAT),
("lineWidth", FLOAT)
]
MULTISAMPLE_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("rasterizationSamples", UINT32_T),
("sampleShadingEnable", BOOL32),
("minSampleShading", FLOAT),
("pSampleMask", POINTER),
("alphaToCoverageEnable", BOOL32),
("alphaToOneEnable", BOOL32)
]
DEPTH_STENCIL_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("depthTestEnable", BOOL32),
("depthWriteEnable", BOOL32),
("depthCompareOp", UINT32_T),
("depthBoundsTestEnable", BOOL32),
("stencilTestEnable", BOOL32),
("front_failOp", UINT32_T),
("front_passOp", UINT32_T),
("front_depthFailOp", UINT32_T),
("front_compareOp", UINT32_T),
("front_compareMask", UINT32_T),
("front_writeMask", UINT32_T),
("front_reference", UINT32_T),
("back_failOp", UINT32_T),
("back_passOp", UINT32_T),
("back_depthFailOp", UINT32_T),
("back_compareOp", UINT32_T),
("back_compareMask", UINT32_T),
("back_writeMask", UINT32_T),
("back_reference", UINT32_T),
("minDepthBounds", FLOAT),
("maxDepthBounds", FLOAT)
]
PIPELINE_COLOR_BLEND_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("logicOpEnable", BOOL32),
("logicOp", UINT32_T),
("attachmentCount", UINT32_T),
("pAttachments", POINTER),
("blendConstant0", FLOAT),
("blendConstant1", FLOAT),
("blendConstant2", FLOAT),
("blendConstant3", FLOAT)
]
PIPELINE_COLOR_BLEND_ATTACHMENT_STATE = [
("blendEnable", BOOL32),
("srcColorBlendFactor", UINT32_T),
("dstColorBlendFactor", UINT32_T),
("colorBlendOp", UINT32_T),
("srcAlphaBlendFactor", UINT32_T),
("dstAlphaBlendFactor", UINT32_T),
("alphaBlendOp", UINT32_T),
("colorWriteMask", UINT32_T)
]
DYNAMIC_STATE = [
("sType", UINT32_T),
("pNext", POINTER),
("flags", UINT32_T),
("dynamicStateCount", UINT32_T),
("pDynamicStates", POINTER)
]
@gapit_test("vkCreateGraphicsPipelines_test")
class SimpleVertexFragment(GapitTest):
def expect(self):
architecture = self.architecture
create_graphics_pipelines = require(self.next_call_of(
"vkCreateGraphicsPipelines"))
require_not_equal(0, create_graphics_pipelines.int_device)
require_not_equal(0, create_graphics_pipelines.int_pipelineCache)
require_equal(1, create_graphics_pipelines.int_createInfoCount)
require_not_equal(0, create_graphics_pipelines.hex_pCreateInfos)
require_equal(0, create_graphics_pipelines.hex_pAllocator)
require_not_equal(0, create_graphics_pipelines.hex_pPipelines)
create_info = VulkanStruct(
architecture, GRAPHICS_PIPELINE_CREATE_INFO,
get_read_offset_function(create_graphics_pipelines,
create_graphics_pipelines.hex_pCreateInfos))
require_equal(VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
create_info.sType)
require_equal(0, create_info.pNext)
require_equal(0, create_info.flags)
require_equal(2, create_info.stageCount)
require_not_equal(0, create_info.pStages)
require_not_equal(0, create_info.pVertexInputState)
require_not_equal(0, create_info.pInputAssemblyState)
require_equal(0, create_info.pTessellationState)
require_not_equal(0, create_info.pViewportState)
require_not_equal(0, create_info.pRasterizationState)
require_not_equal(0, create_info.pMultisampleState)
require_not_equal(0, create_info.pDepthStencilState)
require_not_equal(0, create_info.pColorBlendState)
require_equal(0, create_info.pDynamicState)
require_not_equal(0, create_info.layout)
require_not_equal(0, create_info.renderPass)
require_equal(0, create_info.subpass)
require_equal(0, create_info.basePipelineHandle)
require_equal(0, create_info.basePipelineIndex)
shader_stage_create_info_0 = VulkanStruct(
architecture, PIPELINE_SHADER_STAGE_CREATE_INFO,
get_read_offset_function(create_graphics_pipelines,
create_info.pStages))
require_equal(VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
shader_stage_create_info_0.sType)
require_equal(0, shader_stage_create_info_0.pNext)
require_equal(0, shader_stage_create_info_0.flags)
require_equal(VK_SHADER_STAGE_VERTEX_BIT,
shader_stage_create_info_0.stage)
require_not_equal(0, shader_stage_create_info_0.module)
require_equal("main", require(
create_graphics_pipelines.get_read_string(
shader_stage_create_info_0.pName)))
require_equal(0, shader_stage_create_info_0.pSpecializationInfo)
shader_stage_create_info_1 = VulkanStruct(
architecture, PIPELINE_SHADER_STAGE_CREATE_INFO,
get_read_offset_function(create_graphics_pipelines,
create_info.pStages +
shader_stage_create_info_0.total_size))
require_equal(VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
shader_stage_create_info_1.sType)
require_equal(0, shader_stage_create_info_1.pNext)
require_equal(0, shader_stage_create_info_1.flags)
require_equal(VK_SHADER_STAGE_FRAGMENT_BIT,
shader_stage_create_info_1.stage)
require_not_equal(0, shader_stage_create_info_1.module)
require_equal("main", require(
create_graphics_pipelines.get_read_string(
shader_stage_create_info_1.pName)))
require_equal(0, shader_stage_create_info_1.pSpecializationInfo)
vertex_input_state = VulkanStruct(
architecture, VERTEX_INPUT_STATE_CREATE_INFO,
get_read_offset_function(create_graphics_pipelines,
create_info.pVertexInputState))
require_equal(VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
vertex_input_state.sType)
require_equal(0, vertex_input_state.pNext)
require_equal(0, vertex_input_state.flags)
require_equal(1, vertex_input_state.vertexBindingDescriptionCount)
require_not_equal(0, vertex_input_state.pVertexBindingDescriptions)
require_equal(2, vertex_input_state.vertexAttributeDescriptionCount)
require_not_equal(0, vertex_input_state.pVertexAttributeDescriptions)
vertex_binding_description = VulkanStruct(
architecture, VERTEX_INPUT_BINDING_DESCRIPTION,
get_read_offset_function(
create_graphics_pipelines,
vertex_input_state.pVertexBindingDescriptions))
require_equal(0, vertex_binding_description.binding)
require_equal(24, vertex_binding_description.stride)
require_equal(VK_VERTEX_INPUT_RATE_VERTEX,
vertex_binding_description.inputRate)
vertex_attribute_description_0 = VulkanStruct(
architecture, VERTEX_INPUT_ATTRIBUTE_DESCRIPTION,
get_read_offset_function(
create_graphics_pipelines,
vertex_input_state.pVertexAttributeDescriptions))
require_equal(0, vertex_attribute_description_0.location)
require_equal(0, vertex_attribute_description_0.binding)
require_equal(VK_FORMAT_R32G32B32A32_SFLOAT,
vertex_attribute_description_0.format)
require_equal(0, vertex_attribute_description_0.offset)
vertex_attribute_description_1 = VulkanStruct(
architecture, VERTEX_INPUT_ATTRIBUTE_DESCRIPTION,
get_read_offset_function(
create_graphics_pipelines,
vertex_input_state.pVertexAttributeDescriptions +
vertex_attribute_description_0.total_size))
require_equal(1, vertex_attribute_description_1.location)
require_equal(0, vertex_attribute_description_1.binding)
require_equal(VK_FORMAT_R32G32_SFLOAT,
vertex_attribute_description_1.format)
require_equal(16, vertex_attribute_description_1.offset)
input_assembly_state = VulkanStruct(
architecture, INPUT_ASSEMBLY_STATE,
get_read_offset_function(create_graphics_pipelines,
create_info.pInputAssemblyState))
require_equal(
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
input_assembly_state.sType)
require_equal(0, input_assembly_state.pNext)
require_equal(0, input_assembly_state.flags)
require_equal(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
input_assembly_state.topology)
require_equal(0, input_assembly_state.primitiveRestartEnable)
viewport_state = VulkanStruct(
architecture, VIEWPORT_STATE_CREATE_INFO,
get_read_offset_function(create_graphics_pipelines,
create_info.pViewportState))
require_equal(
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
viewport_state.sType
)
require_equal(0, viewport_state.pNext)
require_equal(0, viewport_state.flags)
require_equal(1, viewport_state.viewportCount)
require_not_equal(0, viewport_state.pViewports)
require_equal(1, viewport_state.scissorCount)
require_not_equal(0, viewport_state.pScissors)
viewport = VulkanStruct(
architecture, VIEWPORT,
get_read_offset_function(create_graphics_pipelines,
viewport_state.pViewports))
require_equal(0.0, viewport.x)
require_equal(0.0, viewport.y)
require_not_equal(0.0, viewport.width)
require_not_equal(0.0, viewport.height)
require_equal(0.0, viewport.minDepth)
require_equal(1.0, viewport.maxDepth)
scissor = VulkanStruct(
architecture, RECT2D,
get_read_offset_function(create_graphics_pipelines,
viewport_state.pScissors))
require_equal(0, scissor.offset_x)
require_equal(0, scissor.offset_y)
require_not_equal(0, scissor.extent_width)
require_not_equal(0, scissor.extent_height)
rasterization_state = VulkanStruct(
architecture, PIPELINE_RASERIZATION_STATE,
get_read_offset_function(create_graphics_pipelines,
create_info.pRasterizationState))
require_equal(
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
rasterization_state.sType)
require_equal(0, rasterization_state.pNext)
require_equal(0, rasterization_state.flags)
require_equal(0, rasterization_state.depthClampEnable)
require_equal(0, rasterization_state.rasterizerDiscardEnable)
require_equal(VK_POLYGON_MODE_FILL, rasterization_state.polygonMode)
require_equal(VK_CULL_MODE_BACK_BIT, rasterization_state.cullMode)
require_equal(VK_FRONT_FACE_CLOCKWISE, rasterization_state.frontFace)
require_equal(0, rasterization_state.depthBiasEnable)
require_equal(0.0, rasterization_state.depthBiasConstantFactor)
require_equal(0.0, rasterization_state.depthBiasClamp)
require_equal(0.0, rasterization_state.depthBiasSlopeFactor)
require_equal(1.0, rasterization_state.lineWidth)
multisample_state = VulkanStruct(architecture,
MULTISAMPLE_STATE, get_read_offset_function(
create_graphics_pipelines,
create_info.pMultisampleState))
require_equal(VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
multisample_state.sType)
require_equal(0, multisample_state.pNext)
require_equal(0, multisample_state.flags)
require_equal(VK_SAMPLE_COUNT_1_BIT,
multisample_state.rasterizationSamples)
require_equal(0, multisample_state.sampleShadingEnable)
require_equal(0.0, multisample_state.minSampleShading)
require_equal(0, multisample_state.pSampleMask)
require_equal(0, multisample_state.alphaToCoverageEnable)
require_equal(0, multisample_state.alphaToOneEnable)
depth_stencil_state = VulkanStruct(architecture,
DEPTH_STENCIL_STATE, get_read_offset_function(
create_graphics_pipelines,
create_info.pDepthStencilState))
require_equal(
VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
depth_stencil_state.sType)
require_equal(0, depth_stencil_state.pNext)
require_equal(0, depth_stencil_state.flags)
require_equal(1, depth_stencil_state.depthTestEnable)
require_equal(1, depth_stencil_state.depthWriteEnable)
require_equal(VK_COMPARE_OP_LESS, depth_stencil_state.depthCompareOp)
require_equal(0, depth_stencil_state.depthBoundsTestEnable)
require_equal(0, depth_stencil_state.stencilTestEnable)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.front_failOp)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.front_passOp)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.front_depthFailOp)
require_equal(VK_COMPARE_OP_NEVER, depth_stencil_state.front_compareOp)
require_equal(0, depth_stencil_state.front_compareMask)
require_equal(0, depth_stencil_state.front_writeMask)
require_equal(0, depth_stencil_state.front_reference)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.back_failOp)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.back_passOp)
require_equal(VK_STENCIL_OP_KEEP, depth_stencil_state.back_depthFailOp)
require_equal(VK_COMPARE_OP_NEVER, depth_stencil_state.back_compareOp)
require_equal(0, depth_stencil_state.back_compareMask)
require_equal(0, depth_stencil_state.back_writeMask)
require_equal(0, depth_stencil_state.back_reference)
require_equal(0.0, depth_stencil_state.minDepthBounds)
require_equal(1.0, depth_stencil_state.maxDepthBounds)
color_blend_state = VulkanStruct(architecture,
PIPELINE_COLOR_BLEND_STATE, get_read_offset_function(
create_graphics_pipelines,
create_info.pColorBlendState))
require_equal(VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
color_blend_state.sType)
require_equal(0, color_blend_state.pNext)
require_equal(0, color_blend_state.flags)
require_equal(0, color_blend_state.logicOpEnable)
require_equal(VK_LOGIC_OP_CLEAR, color_blend_state.logicOp)
require_equal(1, color_blend_state.attachmentCount)
require_not_equal(0, color_blend_state.pAttachments)
require_equal(1.0, color_blend_state.blendConstant0)
require_equal(1.0, color_blend_state.blendConstant1)
require_equal(1.0, color_blend_state.blendConstant2)
require_equal(1.0, color_blend_state.blendConstant3)
color_blend_attachment_state = VulkanStruct(
architecture, PIPELINE_COLOR_BLEND_ATTACHMENT_STATE,
get_read_offset_function(
create_graphics_pipelines, color_blend_state.pAttachments))
require_equal(0, color_blend_attachment_state.blendEnable)
require_equal(VK_BLEND_FACTOR_ZERO,
color_blend_attachment_state.srcColorBlendFactor)
require_equal(VK_BLEND_FACTOR_ONE,
color_blend_attachment_state.dstColorBlendFactor)
require_equal(VK_BLEND_OP_ADD,
color_blend_attachment_state.colorBlendOp)
require_equal(VK_BLEND_FACTOR_ZERO,
color_blend_attachment_state.srcAlphaBlendFactor)
require_equal(VK_BLEND_FACTOR_ONE,
color_blend_attachment_state.dstAlphaBlendFactor)
require_equal(VK_BLEND_OP_ADD,
color_blend_attachment_state.alphaBlendOp)
require_equal(
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
color_blend_attachment_state.colorWriteMask)
destroy_pipeline = require(self.next_call_of(
"vkDestroyPipeline"))
require_equal(create_graphics_pipelines.int_device,
destroy_pipeline.int_device)
require_equal(0, destroy_pipeline.hex_pAllocator)
created_pipeline = little_endian_bytes_to_int(require(
create_graphics_pipelines.get_write_data(
create_graphics_pipelines.hex_pPipelines,
NON_DISPATCHABLE_HANDLE_SIZE)))
require_equal(created_pipeline, destroy_pipeline.int_pipeline)
| |
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from pygments.token import Token
from prompt_toolkit.document import Document
from prompt_toolkit.enums import SEARCH_BUFFER
from prompt_toolkit.filters import to_cli_filter, Never
from .utils import token_list_len
__all__ = (
'HighlightSearchProcessor',
'HighlightSelectionProcessor',
'PasswordProcessor',
'BracketsMismatchProcessor',
'BeforeInput',
'AfterInput',
'ConditionalProcessor',
'ShowLeadingWhiteSpaceProcessor',
'ShowTrailingWhiteSpaceProcessor',
)
class Processor(with_metaclass(ABCMeta, object)):
"""
Manipulate the tokenstream for a `BufferControl`.
"""
@abstractmethod
def run(self, cli, document, tokens):
return tokens, lambda i: i
def has_focus(self, cli):
"""
Processors can override the focus.
(Used for the reverse-i-search prefix in DefaultPrompt.)
"""
return False
def invalidation_hash(self, cli, document):
return None
class HighlightSearchProcessor(Processor):
"""
Processor that highlights search matches in the document.
:param preview_search: A Filter; when active it indicates that we take
the search text in real time while the user is typing, instead of the
last active search state.
"""
def __init__(self, preview_search=Never()):
self.preview_search = to_cli_filter(preview_search)
def _get_search_text(self, cli):
"""
The text we are searching for.
"""
# When the search buffer has focus, take that text.
if self.preview_search(cli) and cli.is_searching and cli.buffers[SEARCH_BUFFER].text:
return cli.buffers[SEARCH_BUFFER].text
# Otherwise, take the text of the last active search.
else:
return cli.search_state.text
def run(self, cli, document, tokens):
search_text = self._get_search_text(cli)
ignore_case = cli.is_ignoring_case
if search_text and not cli.is_returning:
# For each search match, replace the Token.
for index in document.find_all(search_text, ignore_case=ignore_case):
if index == document.cursor_position:
token = Token.SearchMatch.Current
else:
token = Token.SearchMatch
for x in range(index, index + len(search_text)):
tokens[x] = (token, tokens[x][1])
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
search_text = self._get_search_text(cli)
# When the search state changes, highlighting will be different.
return (
search_text,
cli.is_returning,
# When we search for text, and the cursor position changes. The
# processor has to be applied every time again, because the current
# match is highlighted in another color.
(search_text and document.cursor_position),
)
class HighlightSelectionProcessor(Processor):
"""
Processor that highlights the selection in the document.
"""
def run(self, cli, document, tokens):
# In case of selection, highlight all matches.
selection_range = document.selection_range()
if selection_range:
from_, to = selection_range
for i in range(from_, to):
tokens[i] = (Token.SelectedText, tokens[i][1])
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
# When the search state changes, highlighting will be different.
return (
document.selection_range(),
)
class PasswordProcessor(Processor):
"""
Processor that turns masks the input. (For passwords.)
"""
def __init__(self, char='*'):
self.char = char
def run(self, cli, document, tokens):
# Returns (new_token_list, cursor_index_to_token_index_f)
return [(token, self.char * len(text)) for token, text in tokens], lambda i: i
class HighlightMatchingBracketProcessor(Processor):
"""
When the cursor is on or right after a bracket, it highlights the matching
bracket.
"""
_closing_braces = '])}>'
def __init__(self, chars='[](){}<>'):
self.chars = chars
def run(self, cli, document, tokens):
def replace_token(pos):
""" Replace token in list of tokens. """
tokens[pos] = (Token.MatchingBracket, tokens[pos][1])
def apply_for_document(document):
""" Find and replace matching tokens. """
if document.current_char in self.chars:
pos = document.matching_bracket_position
if pos:
replace_token(document.cursor_position)
replace_token(document.cursor_position + pos)
return True
# Apply for character below cursor.
applied = apply_for_document(document)
# Otherwise, apply for character before cursor.
d = document
if not applied and d.cursor_position > 0 and d.char_before_cursor in self._closing_braces:
apply_for_document(Document(d.text, d.cursor_position - 1))
return tokens, lambda i: i
def invalidation_hash(self, cli, document):
on_brace = document.current_char in self.chars
after_brace = document.char_before_cursor in self.chars
if on_brace:
return (True, document.cursor_position)
elif after_brace and document.char_before_cursor in self._closing_braces:
return (True, document.cursor_position - 1)
else:
# Don't include the cursor position in the hash if we are not *on*
# a brace. We don't have to rerender the output, because it will be
# the same anyway.
return False
class BracketsMismatchProcessor(Processor):
"""
Processor that replaces the token type of bracket mismatches by an Error.
"""
error_token = Token.Error
def run(self, cli, document, tokens):
stack = [] # Pointers to the result array
for index, (token, text) in enumerate(tokens):
top = tokens[stack[-1]][1] if stack else ''
if text in '({[]})':
if text in '({[':
# Put open bracket on the stack
stack.append(index)
elif (text == ')' and top == '(' or
text == '}' and top == '{' or
text == ']' and top == '['):
# Match found
stack.pop()
else:
# No match for closing bracket.
tokens[index] = (self.error_token, text)
# Highlight unclosed tags that are still on the stack.
for index in stack:
tokens[index] = (Token.Error, tokens[index][1])
return tokens, lambda i: i
class BeforeInput(Processor):
"""
Insert tokens before the input.
"""
def __init__(self, get_tokens):
assert callable(get_tokens)
self.get_tokens = get_tokens
def run(self, cli, document, tokens):
tokens_before = self.get_tokens(cli)
shift_position = token_list_len(tokens_before)
return tokens_before + tokens, lambda i: i + shift_position
@classmethod
def static(cls, text, token=Token):
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens)
def __repr__(self):
return '%s(get_tokens=%r)' % (
self.__class__.__name__, self.get_tokens)
def invalidation_hash(self, cli, document):
# Redraw when the given tokens change.
return tuple(self.get_tokens(cli))
class AfterInput(Processor):
"""
Insert tokens after the input.
"""
def __init__(self, get_tokens):
assert callable(get_tokens)
self.get_tokens = get_tokens
def run(self, cli, document, tokens):
return tokens + self.get_tokens(cli), lambda i: i
@classmethod
def static(cls, text, token=Token):
def get_static_tokens(cli):
return [(token, text)]
return cls(get_static_tokens)
def __repr__(self):
return '%s(get_tokens=%r)' % (
self.__class__.__name__, self.get_tokens)
def invalidation_hash(self, cli, document):
# Redraw when the given tokens change.
return tuple(self.get_tokens(cli))
class ShowLeadingWhiteSpaceProcessor(Processor):
"""
Make leading whitespace visible.
"""
def __init__(self, token=Token.LeadingWhiteSpace, char='\xb7'):
self.token = token
self.char = char
def run(self, cli, document, tokens):
# Walk through all te tokens.
t = (self.token, self.char)
is_start_of_line = True
for i in range(len(tokens)):
char = tokens[i][1]
if is_start_of_line and char == ' ':
tokens[i] = t
elif char == '\n':
is_start_of_line = True
else:
is_start_of_line = False
return tokens, lambda i: i
class ShowTrailingWhiteSpaceProcessor(Processor):
"""
Make trailing whitespace visible.
"""
def __init__(self, token=Token.TrailingWhiteSpace, char='\xb7'):
self.token = token
self.char = char
def run(self, cli, document, tokens):
# Walk backwards through all te tokens.
t = (self.token, self.char)
is_end_of_line = True
for i in range(len(tokens) - 1, -1, -1):
char = tokens[i][1]
if is_end_of_line and char == ' ':
tokens[i] = t
elif char == '\n':
is_end_of_line = True
else:
is_end_of_line = False
return tokens, lambda i: i
class ConditionalProcessor(Processor):
"""
Processor that applies another processor, according to a certain condition.
Example:
# Create a function that returns whether or not the processor should
# currently be applied.
def highlight_enabled(cli):
return true_or_false
# Wrapt it in a `ConditionalProcessor` for usage in a `BufferControl`.
BufferControl(input_processors=[
ConditionalProcessor(HighlightSearchProcessor(),
Condition(highlight_enabled))])
"""
def __init__(self, processor, filter):
assert isinstance(processor, Processor)
self.processor = processor
self.filter = to_cli_filter(filter)
def run(self, cli, document, tokens):
# Run processor when enabled.
if self.filter(cli):
return self.processor.run(cli, document, tokens)
else:
return tokens, lambda i: i
def has_focus(self, cli):
if self.filter(cli):
return self.processor.has_focus(cli)
else:
return False
def invalidation_hash(self, cli, document):
# When enabled, use the hash of the processor. Otherwise, just use
# False.
if self.filter(cli):
return (True, self.processor.invalidation_hash(cli, document))
else:
return False
def __repr__(self):
return '%s(processor=%r, filter=%r)' % (
self.__class__.__name__, self.processor, self.filter)
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import ddt
import mock
from rally.task.processing import plot
from tests.unit import test
PLOT = "rally.task.processing.plot."
@ddt.ddt
class PlotTestCase(test.TestCase):
@mock.patch(PLOT + "charts")
def test__process_scenario(self, mock_charts):
for mock_ins, ret in [
(mock_charts.MainStatsTable, "main_stats"),
(mock_charts.MainStackedAreaChart, "main_stacked"),
(mock_charts.AtomicStackedAreaChart, "atomic_stacked"),
(mock_charts.OutputStackedAreaDeprecatedChart,
"output_stacked"),
(mock_charts.LoadProfileChart, "load_profile"),
(mock_charts.MainHistogramChart, "main_histogram"),
(mock_charts.AtomicHistogramChart, "atomic_histogram"),
(mock_charts.AtomicAvgChart, "atomic_avg")]:
setattr(mock_ins.return_value.render, "return_value", ret)
iterations = [
{"timestamp": i + 2, "error": [],
"duration": i + 5, "idle_duration": i,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo_action": i + 10}} for i in range(10)]
data = {"iterations": iterations, "sla": [],
"key": {"kw": {"runner": {"type": "constant"}},
"name": "Foo.bar", "pos": 0},
"info": {"atomic": {"foo_action": {"max_duration": 19,
"min_duration": 10}},
"full_duration": 40, "load_duration": 32,
"iterations_count": 10, "iterations_passed": 10,
"max_duration": 14, "min_duration": 5,
"output_names": [],
"tstamp_end": 25, "tstamp_start": 2}}
task_data = plot._process_scenario(data, 1)
self.assertEqual(
task_data, {
"cls": "Foo", "met": "bar", "name": "bar [2]", "pos": "1",
"runner": "constant", "config": json.dumps(
{"Foo.bar": [{"runner": {"type": "constant"}}]},
indent=2),
"full_duration": 40, "load_duration": 32,
"atomic": {"histogram": "atomic_histogram",
"iter": "atomic_stacked", "pie": "atomic_avg"},
"iterations": {"histogram": "main_histogram",
"iter": "main_stacked",
"pie": [("success", 10), ("errors", 0)]},
"iterations_count": 10, "errors": [],
"load_profile": "load_profile",
"additive_output": [],
"complete_output": [[], [], [], [], [], [], [], [], [], []],
"output_errors": [],
"sla": [], "sla_success": True, "table": "main_stats"})
@mock.patch(PLOT + "_process_scenario")
@mock.patch(PLOT + "json.dumps", return_value="json_data")
def test__process_tasks(self, mock_json_dumps, mock__process_scenario):
tasks_results = [{"key": {"name": i, "kw": "kw_" + i}}
for i in ("a", "b", "c", "b")]
mock__process_scenario.side_effect = lambda a, b: (
{"cls": "%s_cls" % a["key"]["name"],
"name": str(b),
"met": "dummy",
"pos": str(b)})
source, tasks = plot._process_tasks(tasks_results)
self.assertEqual(source, "json_data")
mock_json_dumps.assert_called_once_with(
{"a": ["kw_a"], "b": ["kw_b", "kw_b"], "c": ["kw_c"]},
sort_keys=True, indent=2)
self.assertEqual(
tasks,
[{"cls": "a_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "1", "pos": "1"},
{"cls": "c_cls", "met": "dummy", "name": "0", "pos": "0"}])
@ddt.data({},
{"include_libs": True},
{"include_libs": False})
@ddt.unpack
@mock.patch(PLOT + "_process_tasks")
@mock.patch(PLOT + "_extend_results")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch(PLOT + "json.dumps", side_effect=lambda s: "json_" + s)
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_plot(self, mock_version_string, mock_dumps, mock_get_template,
mock__extend_results, mock__process_tasks, **ddt_kwargs):
mock__process_tasks.return_value = "source", "scenarios"
mock_get_template.return_value.render.return_value = "tasks_html"
mock__extend_results.return_value = ["extended_result"]
html = plot.plot("tasks_results", **ddt_kwargs)
self.assertEqual(html, "tasks_html")
mock__extend_results.assert_called_once_with("tasks_results")
mock_get_template.assert_called_once_with("task/report.html")
mock__process_tasks.assert_called_once_with(["extended_result"])
if "include_libs" in ddt_kwargs:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="json_scenarios", source="json_source",
include_libs=ddt_kwargs["include_libs"])
else:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="json_scenarios", source="json_source",
include_libs=False)
@mock.patch(PLOT + "objects.Task.extend_results")
def test__extend_results(self, mock_task_extend_results):
mock_task_extend_results.side_effect = iter(
[["extended_foo"], ["extended_bar"], ["extended_spam"]])
tasks_results = [
{"key": "%s_key" % k, "sla": "%s_sla" % k,
"full_duration": "%s_full_duration" % k,
"load_duration": "%s_load_duration" % k,
"result": "%s_result" % k} for k in ("foo", "bar", "spam")]
generic_results = [
{"id": None, "created_at": None, "updated_at": None,
"task_uuid": None, "key": "%s_key" % k,
"data": {"raw": "%s_result" % k,
"full_duration": "%s_full_duration" % k,
"load_duration": "%s_load_duration" % k,
"sla": "%s_sla" % k}} for k in ("foo", "bar", "spam")]
results = plot._extend_results(tasks_results)
self.assertEqual([mock.call([r]) for r in generic_results],
mock_task_extend_results.mock_calls)
self.assertEqual(["extended_foo", "extended_bar", "extended_spam"],
results)
def test__extend_results_empty(self):
self.assertEqual([], plot._extend_results([]))
@mock.patch(PLOT + "Trends")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch(PLOT + "_extend_results")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_trends(self, mock_version_string, mock__extend_results,
mock_get_template, mock_trends):
mock__extend_results.return_value = ["foo", "bar"]
trends = mock.Mock()
trends.get_data.return_value = ["foo", "bar"]
mock_trends.return_value = trends
template = mock.Mock()
template.render.return_value = "trends html"
mock_get_template.return_value = template
self.assertEqual("trends html", plot.trends("tasks_results"))
self.assertEqual([mock.call("foo"), mock.call("bar")],
trends.add_result.mock_calls)
mock_get_template.assert_called_once_with("task/trends.html")
template.render.assert_called_once_with(version="42.0",
data="[\"foo\", \"bar\"]")
@ddt.ddt
class TrendsTestCase(test.TestCase):
def test___init__(self):
trends = plot.Trends()
self.assertEqual({}, trends._tasks)
self.assertRaises(TypeError, plot.Trends, 42)
@ddt.data({"args": [None], "result": "None"},
{"args": [""], "result": ""},
{"args": [" str value "], "result": "str value"},
{"args": [" 42 "], "result": "42"},
{"args": ["42"], "result": "42"},
{"args": [42], "result": "42"},
{"args": [42.00], "result": "42.0"},
{"args": [[3.2, 1, " foo ", None]], "result": "1,3.2,None,foo"},
{"args": [(" def", "abc", [22, 33])], "result": "22,33,abc,def"},
{"args": [{}], "result": ""},
{"args": [{1: 2, "a": " b c "}], "result": "1:2|a:b c"},
{"args": [{"foo": "bar", (1, 2): [5, 4, 3]}],
"result": "1,2:3,4,5|foo:bar"},
{"args": [1, 2], "raises": TypeError},
{"args": [set()], "raises": TypeError})
@ddt.unpack
def test__to_str(self, args, result=None, raises=None):
trends = plot.Trends()
if raises:
self.assertRaises(raises, trends._to_str, *args)
else:
self.assertEqual(result, trends._to_str(*args))
@mock.patch(PLOT + "hashlib")
def test__make_hash(self, mock_hashlib):
mock_hashlib.md5.return_value.hexdigest.return_value = "md5_digest"
trends = plot.Trends()
trends._to_str = mock.Mock()
trends._to_str.return_value.encode.return_value = "foo_str"
self.assertEqual("md5_digest", trends._make_hash("foo_obj"))
trends._to_str.assert_called_once_with("foo_obj")
trends._to_str.return_value.encode.assert_called_once_with("utf8")
mock_hashlib.md5.assert_called_once_with("foo_str")
def _make_result(self, salt, sla_success=True):
return {
"key": {"kw": salt + "_kw", "name": "Scenario.name_%s" % salt},
"sla": [{"success": sla_success}],
"info": {"iterations_count": 4,
"atomic": {"a": 123, "b": 456},
"stat": {"rows": [["a", 0.7, 0.85, 0.9, 0.87,
1.25, 0.67, "100.0%", 4],
["b", 0.5, 0.75, 0.85, 0.9,
1.1, 0.58, "100.0%", 4],
["total", 1.2, 1.55, 1.7, 1.9,
1.5, 1.6, "100.0%", 4]],
"cols": ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)",
"Max (sec)", "Avg (sec)", "Success",
"Count"]}},
"iterations": ["<iter-0>", "<iter-1>", "<iter-2>", "<iter-3>"]}
def _sort_trends(self, trends_result):
for r_idx, res in enumerate(trends_result):
trends_result[r_idx]["total"]["values"].sort()
for a_idx, dummy in enumerate(res["atomic"]):
trends_result[r_idx]["atomic"][a_idx]["values"].sort()
return trends_result
def test_add_result_and_get_data(self):
trends = plot.Trends()
for i in 0, 1:
trends.add_result(self._make_result(str(i)))
expected = [
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"0_kw\"", "met": "name_0",
"name": "Scenario.name_0", "seq": 1, "single": True,
"sla_failures": 0, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}},
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"1_kw\"", "met": "name_1",
"name": "Scenario.name_1", "seq": 1, "single": True,
"sla_failures": 0, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}}]
self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_once_and_get_data(self):
trends = plot.Trends()
trends.add_result(self._make_result("foo", sla_success=False))
expected = [
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"foo_kw\"", "met": "name_foo",
"name": "Scenario.name_foo", "seq": 1, "single": True,
"sla_failures": 1, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}}]
self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_get_data_no_results_added(self):
trends = plot.Trends()
self.assertEqual([], trends.get_data())
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import copy
import functools
import logging
import os
import sys
import threading
import time
import grpc
import grpc.experimental
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
# NOTE(rbellevi): No guarantees are given about the maintenance of this
# environment variable.
_DEFAULT_SINGLE_THREADED_UNARY_STREAM = os.getenv(
"GRPC_SINGLE_THREADED_UNARY_STREAM") is not None
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
try:
callback()
except Exception as e: # pylint: disable=broad-except
# NOTE(rbellevi): We suppress but log errors here so as not to
# kill the channel spin thread.
logging.error('Exception in callback %s: %s',
repr(callback.func), repr(e))
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
"""Consume a request iterator supplied by the user."""
def consume_request_iterator(): # pylint: disable=too-many-branches
# Iterate over the request iterator until it is exhausted or an error
# condition is encountered.
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
def _done():
return (state.code is not None or
cygrpc.OperationType.send_message not in
state.due)
_common.wait(state.condition.wait,
_done,
spin_cb=functools.partial(
cygrpc.block_if_fork_in_progress,
state))
if state.code is not None:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
def _rpc_state_string(class_name, rpc_state):
"""Calculates error string for RPC."""
with rpc_state.condition:
if rpc_state.code is None:
return '<{} object>'.format(class_name)
elif rpc_state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(class_name, rpc_state.code,
rpc_state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
class_name, rpc_state.code, rpc_state.details,
rpc_state.debug_error_string)
class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future):
"""An RPC error not tied to the execution of a particular RPC.
The RPC represented by the state object must not be in-progress or
cancelled.
Attributes:
_state: An instance of _RPCState.
"""
def __init__(self, state):
with state.condition:
self._state = _RPCState((), copy.deepcopy(state.initial_metadata),
copy.deepcopy(state.trailing_metadata),
state.code, copy.deepcopy(state.details))
self._state.response = copy.copy(state.response)
self._state.debug_error_string = copy.copy(state.debug_error_string)
def initial_metadata(self):
return self._state.initial_metadata
def trailing_metadata(self):
return self._state.trailing_metadata
def code(self):
return self._state.code
def details(self):
return _common.decode(self._state.details)
def debug_error_string(self):
return _common.decode(self._state.debug_error_string)
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def cancel(self):
"""See grpc.Future.cancel."""
return False
def cancelled(self):
"""See grpc.Future.cancelled."""
return False
def running(self):
"""See grpc.Future.running."""
return False
def done(self):
"""See grpc.Future.done."""
return True
def result(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.result."""
raise self
def exception(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.exception."""
return self
def traceback(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.traceback."""
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.add_done_callback."""
fn(self)
class _Rendezvous(grpc.RpcError, grpc.RpcContext):
"""An RPC iterator.
Attributes:
_state: An instance of _RPCState.
_call: An instance of SegregatedCall or IntegratedCall.
In either case, the _call object is expected to have operate, cancel,
and next_event methods.
_response_deserializer: A callable taking bytes and return a Python
object.
_deadline: A float representing the deadline of the RPC in seconds. Or
possibly None, to represent an RPC with no deadline at all.
"""
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def is_active(self):
"""See grpc.RpcContext.is_active"""
with self._state.condition:
return self._state.code is None
def time_remaining(self):
"""See grpc.RpcContext.time_remaining"""
with self._state.condition:
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def cancel(self):
"""See grpc.RpcContext.cancel"""
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return True
else:
return False
def add_callback(self, callback):
"""See grpc.RpcContext.add_callback"""
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def __iter__(self):
return self
def next(self):
return self._next()
def __next__(self):
return self._next()
def _next(self):
raise NotImplementedError()
def debug_error_string(self):
raise NotImplementedError()
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
class _SingleThreadedRendezvous(_Rendezvous, grpc.Call): # pylint: disable=too-many-ancestors
"""An RPC iterator operating entirely on a single thread.
The __next__ method of _SingleThreadedRendezvous does not depend on the
existence of any other thread, including the "channel spin thread".
However, this means that its interface is entirely synchronous. So this
class cannot fulfill the grpc.Future interface.
"""
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
# NOTE(gnossen): Based on our initial call batch, we are guaranteed
# to receive initial metadata before any messages.
while self._state.initial_metadata is None:
self._consume_next_event()
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
if self._state.trailing_metadata is None:
raise grpc.experimental.UsageError(
"Cannot get trailing metadata until RPC is completed.")
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
if self._state.code is None:
raise grpc.experimental.UsageError(
"Cannot get code until RPC is completed.")
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
if self._state.details is None:
raise grpc.experimental.UsageError(
"Cannot get details until RPC is completed.")
return _common.decode(self._state.details)
def _consume_next_event(self):
event = self._call.next_event()
with self._state.condition:
callbacks = _handle_event(event, self._state,
self._response_deserializer)
for callback in callbacks:
# NOTE(gnossen): We intentionally allow exceptions to bubble up
# to the user when running on a single thread.
callback()
return event
def _next_response(self):
while True:
self._consume_next_event()
with self._state.condition:
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _next(self):
with self._state.condition:
if self._state.code is None:
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
return self._next_response()
def debug_error_string(self):
with self._state.condition:
if self._state.debug_error_string is None:
raise grpc.experimental.UsageError(
"Cannot get debug error string until RPC is completed.")
return _common.decode(self._state.debug_error_string)
class _MultiThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
"""An RPC iterator that depends on a channel spin thread.
This iterator relies upon a per-channel thread running in the background,
dequeueing events from the completion queue, and notifying threads waiting
on the threading.Condition object in the _RPCState object.
This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface
and to mediate a bidirection streaming RPC.
"""
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
def _done():
return self._state.initial_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
def _done():
return self._state.trailing_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
def _done():
return self._state.code is not None
_common.wait(self._state.condition.wait, _done)
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
def _done():
return self._state.details is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
def _done():
return self._state.debug_error_string is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.debug_error_string)
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def _is_complete(self):
return self._state.code is not None
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
See grpc.Future.result for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
"""Return the exception raised by the computation.
See grpc.Future.exception for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
See grpc.future.traceback for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(functools.partial(fn, self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
def _response_ready():
return (
self._state.response is not None or
(cygrpc.OperationType.receive_message not in self._state.due
and self._state.code is not None))
_common.wait(self._state.condition.wait, _response_ready)
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
error = _InactiveRpcError(state)
return deadline, None, error
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _MultiThreadedRendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _InactiveRpcError(state)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
) for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata,
None if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, method, request_serializer,
response_deserializer):
self._channel = channel
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request,
self._request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
raise _InactiveRpcError(state)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call_credentials = None if credentials is None else credentials._credentials
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operations = (
(cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS)),
(cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
operations_and_tags = tuple((ops, None) for ops in operations)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), metadata, call_credentials,
operations_and_tags, self._context)
return _SingleThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials,
operationses, _event_handler(state,
self._response_deserializer),
self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(metadata,
initial_metadata_flags),
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
operationses, event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(target=_deliver,
args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[connectivity])
callbacks = tuple(
callback for callback, unused_but_known_to_be_none_connectivity in
state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
def _separate_channel_options(options):
"""Separates core channel options from Python channel options."""
core_options = []
python_options = []
for pair in options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
python_options.append(pair)
else:
core_options.append(pair)
return python_options, core_options
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
python_options, core_options = _separate_channel_options(options)
self._single_threaded_unary_stream = _DEFAULT_SINGLE_THREADED_UNARY_STREAM
self._process_python_options(python_options)
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(core_options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def _process_python_options(self, python_options):
"""Sets channel attributes according to python-only channel options."""
for pair in python_options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
self._single_threaded_unary_stream = True
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
# NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC
# on a single Python thread results in an appreciable speed-up. However,
# due to slight differences in capability, the multi-threaded variant
# remains the default.
if self._single_threaded_unary_stream:
return _SingleThreadedUnaryStreamMultiCallable(
self._channel, _common.encode(method), request_serializer,
response_deserializer)
else:
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer,
response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
| |
import copy
import os
import platform
from conans.client import join_arguments
from conans.client.build.compiler_flags import (architecture_flag, build_type_define,
build_type_flags, format_defines,
format_include_paths, format_libraries,
format_library_paths, libcxx_define, libcxx_flag,
pic_flag, rpath_flags, sysroot_flag)
from conans.client.build.cppstd_flags import cppstd_flag, cppstd_from_settings
from conans.client.tools.env import environment_append
from conans.client.tools.oss import OSInfo, args_to_string, cpu_count, cross_building, \
detected_architecture, detected_os, get_gnu_triplet
from conans.client.tools.win import unix_path
from conans.errors import ConanException
from conans.model.build_info import DEFAULT_BIN, DEFAULT_INCLUDE, DEFAULT_LIB, DEFAULT_SHARE
from conans.util.files import get_abs_path
class AutoToolsBuildEnvironment(object):
"""
- CPPFLAGS (C-PreProcesor-Flags NOT related with c++) (-I -D)
- CFLAGS (not CPPFLAGS nor LDFLAGS, used for optimization or debugging)
- CXXFLAGS (the CFLAGS for c++)
- LDFLAGS (-L, others like -m64 -m32) linker
"""
def __init__(self, conanfile, win_bash=False, include_rpath_flags=False):
"""
FIXME: include_rpath_flags CONAN 2.0 to default True? Could break many packages in center
"""
self._conanfile = conanfile
self._win_bash = win_bash
self._include_rpath_flags = include_rpath_flags
self.subsystem = OSInfo().detect_windows_subsystem() if self._win_bash else None
self._deps_cpp_info = conanfile.deps_cpp_info
self._os = conanfile.settings.get_safe("os")
self._arch = conanfile.settings.get_safe("arch")
self._os_target = conanfile.settings.get_safe("os_target")
self._arch_target = conanfile.settings.get_safe("arch_target")
self._build_type = conanfile.settings.get_safe("build_type")
self._compiler = conanfile.settings.get_safe("compiler")
self._compiler_version = conanfile.settings.get_safe("compiler.version")
self._compiler_runtime = conanfile.settings.get_safe("compiler.runtime")
self._libcxx = conanfile.settings.get_safe("compiler.libcxx")
self._cppstd = cppstd_from_settings(conanfile.settings)
# Set the generic objects before mapping to env vars to let the user
# alter some value
self.libs = copy.copy(self._deps_cpp_info.libs)
self.include_paths = copy.copy(self._deps_cpp_info.include_paths)
self.library_paths = copy.copy(self._deps_cpp_info.lib_paths)
self.defines = self._configure_defines()
# Will go to CFLAGS and CXXFLAGS ["-m64" "-m32", "-g", "-s"]
self.flags = self._configure_flags()
# Only c++ flags [-stdlib, -library], will go to CXXFLAGS
self.cxx_flags = self._configure_cxx_flags()
# cpp standard
self.cppstd_flag = cppstd_flag(self._compiler, self._compiler_version, self._cppstd)
# Not -L flags, ["-m64" "-m32"]
self.link_flags = self._configure_link_flags() # TEST!
# Precalculate -fPIC
self.fpic = self._configure_fpic()
# Precalculate build, host, target triplets
self.build, self.host, self.target = self._get_host_build_target_flags()
def _configure_fpic(self):
if not str(self._os).startswith("Windows"):
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
return True if (fpic or shared) else None
def _get_host_build_target_flags(self):
"""Based on google search for build/host triplets, it could need a lot
and complex verification"""
arch_detected = detected_architecture() or platform.machine()
os_detected = detected_os() or platform.system()
if self._os_target and self._arch_target:
try:
target = get_gnu_triplet(self._os_target, self._arch_target, self._compiler)
except ConanException as exc:
self._conanfile.output.warn(str(exc))
target = None
else:
target = None
if os_detected is None or arch_detected is None or self._arch is None or self._os is None:
return False, False, target
if not cross_building(self._conanfile.settings, os_detected, arch_detected):
return False, False, target
try:
build = get_gnu_triplet(os_detected, arch_detected, self._compiler)
except ConanException as exc:
self._conanfile.output.warn(str(exc))
build = None
try:
host = get_gnu_triplet(self._os, self._arch, self._compiler)
except ConanException as exc:
self._conanfile.output.warn(str(exc))
host = None
return build, host, target
def configure(self, configure_dir=None, args=None, build=None, host=None, target=None,
pkg_config_paths=None, vars=None, use_default_install_dirs=True):
"""
:param pkg_config_paths: Optional paths to locate the *.pc files
:param configure_dir: Absolute or relative path to the configure script
:param args: Optional arguments to pass to configure.
:param build: In which system the program will be built. "False" skips the --build flag
:param host: In which system the generated program will run. "False" skips the --host flag
:param target: This option is only used to build a cross-compiling toolchain.
"False" skips the --target flag
When the tool chain generates executable program, in which target system
the program will run.
http://jingfenghanmax.blogspot.com.es/2010/09/configure-with-host-target-and-build.html
https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html
:param use_default_install_dirs: Use or not the defaulted installation dirs
"""
if not self._conanfile.should_configure:
return
if configure_dir:
configure_dir = configure_dir.rstrip("/")
else:
configure_dir = "."
triplet_args = []
if build is not False: # Skipped by user
if build or self.build: # User specified value or automatic
triplet_args.append("--build=%s" % (build or self.build))
if host is not False: # Skipped by user
if host or self.host: # User specified value or automatic
triplet_args.append("--host=%s" % (host or self.host))
if target is not False: # Skipped by user
if target or self.target: # User specified value or automatic
triplet_args.append("--target=%s" % (target or self.target))
if pkg_config_paths:
pkg_env = {"PKG_CONFIG_PATH":
[os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)]}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
pkg_env = {"PKG_CONFIG_PATH": [self._conanfile.install_folder]} \
if "pkg_config" in self._conanfile.generators else {}
configure_dir = self._adjust_path(configure_dir)
if self._conanfile.package_folder is not None:
if not args:
args = ["--prefix=%s" % self._conanfile.package_folder.replace("\\", "/")]
elif not self._is_flag_in_args("prefix", args):
args.append("--prefix=%s" % self._conanfile.package_folder.replace("\\", "/"))
all_flags = ["bindir", "sbindir", "libexecdir", "libdir", "includedir", "oldincludedir",
"datarootdir"]
help_output = self._configure_help_output(configure_dir)
available_flags = [flag for flag in all_flags if "--%s" % flag in help_output]
if use_default_install_dirs:
for varname in ["bindir", "sbindir", "libexecdir"]:
if self._valid_configure_flag(varname, args, available_flags):
args.append("--%s=${prefix}/%s" % (varname, DEFAULT_BIN))
if self._valid_configure_flag("libdir", args, available_flags):
args.append("--libdir=${prefix}/%s" % DEFAULT_LIB)
for varname in ["includedir", "oldincludedir"]:
if self._valid_configure_flag(varname, args, available_flags):
args.append("--%s=${prefix}/%s" % (varname, DEFAULT_INCLUDE))
if self._valid_configure_flag("datarootdir", args, available_flags):
args.append("--datarootdir=${prefix}/%s" % DEFAULT_SHARE)
with environment_append(pkg_env):
with environment_append(vars or self.vars):
command = '%s/configure %s %s' % (configure_dir, args_to_string(args),
" ".join(triplet_args))
self._conanfile.output.info("Calling:\n > %s" % command)
self._conanfile.run(command, win_bash=self._win_bash, subsystem=self.subsystem)
def _configure_help_output(self, configure_path):
from six import StringIO # Python 2 and 3 compatible
mybuf = StringIO()
try:
self._conanfile.run("%s/configure --help" % configure_path, output=mybuf)
except ConanException as e:
self._conanfile.output.warn("Error running `configure --help`: %s" % e)
return ""
return mybuf.getvalue()
def _adjust_path(self, path):
if self._win_bash:
path = unix_path(path, path_flavor=self.subsystem)
return '"%s"' % path if " " in path else path
@staticmethod
def _valid_configure_flag(varname, args, available_flags):
return not AutoToolsBuildEnvironment._is_flag_in_args(varname, args) and \
varname in available_flags
@staticmethod
def _is_flag_in_args(varname, args):
flag = "--%s=" % varname
return any([flag in arg for arg in args])
def make(self, args="", make_program=None, target=None, vars=None):
if not self._conanfile.should_build:
return
make_program = os.getenv("CONAN_MAKE_PROGRAM") or make_program or "make"
with environment_append(vars or self.vars):
str_args = args_to_string(args)
cpu_count_option = (("-j%s" % cpu_count(output=self._conanfile.output))
if "-j" not in str_args else None)
self._conanfile.run("%s" % join_arguments([make_program, target, str_args,
cpu_count_option]),
win_bash=self._win_bash, subsystem=self.subsystem)
def install(self, args="", make_program=None, vars=None):
if not self._conanfile.should_install:
return
self.make(args=args, make_program=make_program, target="install", vars=vars)
def _configure_link_flags(self):
"""Not the -L"""
ret = copy.copy(self._deps_cpp_info.sharedlinkflags)
ret.extend(self._deps_cpp_info.exelinkflags)
arch_flag = architecture_flag(compiler=self._compiler, os=self._os, arch=self._arch)
if arch_flag:
ret.append(arch_flag)
sysf = sysroot_flag(self._deps_cpp_info.sysroot, win_bash=self._win_bash,
subsystem=self.subsystem,
compiler=self._compiler)
if sysf:
ret.append(sysf)
if self._include_rpath_flags:
the_os = self._conanfile.settings.get_safe("os_build") or self._os
ret.extend(rpath_flags(the_os, self._compiler, self._deps_cpp_info.lib_paths))
return ret
def _configure_flags(self):
ret = copy.copy(self._deps_cpp_info.cflags)
arch_flag = architecture_flag(compiler=self._compiler, os=self._os, arch=self._arch)
if arch_flag:
ret.append(arch_flag)
btfs = build_type_flags(compiler=self._compiler, build_type=self._build_type,
vs_toolset=self._conanfile.settings.get_safe("compiler.toolset"))
if btfs:
ret.extend(btfs)
srf = sysroot_flag(self._deps_cpp_info.sysroot, win_bash=self._win_bash,
subsystem=self.subsystem,
compiler=self._compiler)
if srf:
ret.append(srf)
if self._compiler_runtime:
ret.append("-%s" % self._compiler_runtime)
return ret
def _configure_cxx_flags(self):
ret = copy.copy(self._deps_cpp_info.cxxflags)
cxxf = libcxx_flag(compiler=self._compiler, libcxx=self._libcxx)
if cxxf:
ret.append(cxxf)
return ret
def _configure_defines(self):
# requires declared defines
ret = copy.copy(self._deps_cpp_info.defines)
# Debug definition for GCC
btf = build_type_define(build_type=self._build_type)
if btf:
ret.append(btf)
# CXX11 ABI
abif = libcxx_define(compiler=self._compiler, libcxx=self._libcxx)
if abif:
ret.append(abif)
return ret
def _get_vars(self):
def append(*args):
ret = []
for arg in args:
if arg:
if isinstance(arg, list):
ret.extend(arg)
else:
ret.append(arg)
return ret
lib_paths = format_library_paths(self.library_paths, win_bash=self._win_bash,
subsystem=self.subsystem, compiler=self._compiler)
include_paths = format_include_paths(self.include_paths, win_bash=self._win_bash,
subsystem=self.subsystem, compiler=self._compiler)
ld_flags = append(self.link_flags, lib_paths)
cpp_flags = append(include_paths, format_defines(self.defines))
libs = format_libraries(self.libs, compiler=self._compiler)
tmp_compilation_flags = copy.copy(self.flags)
if self.fpic:
tmp_compilation_flags.append(pic_flag(self._compiler))
cxx_flags = append(tmp_compilation_flags, self.cxx_flags, self.cppstd_flag)
c_flags = tmp_compilation_flags
return ld_flags, cpp_flags, libs, cxx_flags, c_flags
@property
def vars_dict(self):
ld_flags, cpp_flags, libs, cxx_flags, c_flags = self._get_vars()
if os.environ.get("CPPFLAGS", None):
cpp_flags.append(os.environ.get("CPPFLAGS", None))
if os.environ.get("CXXFLAGS", None):
cxx_flags.append(os.environ.get("CXXFLAGS", None))
if os.environ.get("CFLAGS", None):
c_flags.append(os.environ.get("CFLAGS", None))
if os.environ.get("LDFLAGS", None):
ld_flags.append(os.environ.get("LDFLAGS", None))
if os.environ.get("LIBS", None):
libs.append(os.environ.get("LIBS", None))
ret = {"CPPFLAGS": cpp_flags,
"CXXFLAGS": cxx_flags,
"CFLAGS": c_flags,
"LDFLAGS": ld_flags,
"LIBS": libs
}
return ret
@property
def vars(self):
ld_flags, cpp_flags, libs, cxx_flags, c_flags = self._get_vars()
cpp_flags = " ".join(cpp_flags) + _environ_value_prefix("CPPFLAGS")
cxx_flags = " ".join(cxx_flags) + _environ_value_prefix("CXXFLAGS")
cflags = " ".join(c_flags) + _environ_value_prefix("CFLAGS")
ldflags = " ".join(ld_flags) + _environ_value_prefix("LDFLAGS")
libs = " ".join(libs) + _environ_value_prefix("LIBS")
ret = {"CPPFLAGS": cpp_flags.strip(),
"CXXFLAGS": cxx_flags.strip(),
"CFLAGS": cflags.strip(),
"LDFLAGS": ldflags.strip(),
"LIBS": libs.strip()
}
return ret
def _environ_value_prefix(var_name, prefix=" "):
if os.environ.get(var_name, ""):
return "%s%s" % (prefix, os.environ.get(var_name, ""))
else:
return ""
| |
# testing/exclusions.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
import operator
from .plugin.plugin_base import SkipTest
from sqlalchemy.util import decorator
from . import config
from sqlalchemy import util
from alembic import compat
import inspect
import contextlib
from .compat import get_url_driver_name, get_url_backend_name
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
raise SkipTest(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
compat.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.fails.union(predicate.skips))
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, compat.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": get_url_driver_name(config.db.url),
"database": get_url_backend_name(config.db.url),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect.getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails():
return fails_if(BooleanPredicate(True, "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
| |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LARS optimizer."""
from __future__ import absolute_import
import numpy
from ..ndarray import (zeros, clip, array,
multi_sum_sq, multi_lars,
norm as NDnorm,
where, ones_like)
from ..ndarray import (sgd_update, sgd_mom_update,
mp_sgd_update, mp_sgd_mom_update,
preloaded_multi_sgd_update, preloaded_multi_sgd_mom_update,
preloaded_multi_mp_sgd_update, preloaded_multi_mp_sgd_mom_update)
from .optimizer import Optimizer, register
from .utils import _flatten_list
__all__ = ['LARS']
@register
class LARS(Optimizer):
"""the LARS optimizer from 'Large Batch Training of Convolution Networks' \
(https://arxiv.org/abs/1708.03888)
Behave mostly like SGD with momentum and weight decay but is scaling \
adaptively the learning for each layer:
.. code-block::
w_norm = L2norm(weights)
g_norm = L2norm(gradients)
if w_norm > 0 and g_norm > 0:
lr_layer = lr * w_norm / (g_norm + weight_decay * w_norm + epsilon)
else:
lr_layer = lr
Parameters
----------
learning_rate : float, default 0.1
The initial learning rate. If None, the optimization will use the
learning rate from ``lr_scheduler``. If not None, it will overwrite
the learning rate in ``lr_scheduler``. If None and ``lr_scheduler``
is also None, then it will be set to 0.01 by default.
momentum : float, default 0.
The momentum value.
eta : float, default 0.001
LARS coefficient used to scale the learning rate.
epsilon : float, default 1e-8
Small value to avoid division by 0.
lazy_update : bool, default False
Default is False. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
aggregate_num : int, default 1
Number of weights to be aggregated in a list.
They are passed to the optimizer for a single optimization step.
use_fused_step : bool, default True
Whether or not to use fused kernels for optimizer.
When use_fused_step=False, step is called,
otherwise, fused_step is called.
"""
def __init__(self, learning_rate=0.1, momentum=0.0, eta=0.001,
epsilon=1e-8, lazy_update=False, use_fused_step=True,
aggregate_num=1, **kwargs):
super(LARS, self).__init__(learning_rate=learning_rate,
use_fused_step=use_fused_step,
aggregate_num=aggregate_num,
**kwargs)
if not self.use_fused_step:
assert not lazy_update,\
'When use_fused_step is set to False, lazy_update has to be turned off.'
if lazy_update:
assert not self.multi_precision, \
'When lazy_update is set to True, multi_precision has be turned off.'
self.lazy_update = lazy_update
self.momentum = momentum
self.eta = eta
self.epsilon = epsilon
self.lazy_update = lazy_update
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
stype = weight.stype if self.lazy_update else 'default'
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _l2norm(self, v, rescale=False):
"""L2 Norm implementation"""
v = v.astype('float32')
if rescale:
v *= self.rescale_grad
norm = NDnorm(v)
return norm
def _get_lars(self, index, weight, grad, wd):
"""Returns a scaling factor for the learning rate for this layer"""
lars = 1.0
name = self.idx2name[index] if index in self.idx2name else str(index)
if name.endswith('gamma') or name.endswith('beta') or name.endswith('bias'):
return lars
w_norm = self._l2norm(weight)
g_norm = self._l2norm(grad, rescale=True)
# calculate lars_trust_ratio
ratio = w_norm / g_norm
# becomes NaN if ratio == NaN or 0, otherwise 0
nan_or_zero = 1 - ratio / ratio
lars = self.eta * w_norm / (g_norm + wd * w_norm + self.epsilon)
lars = where(nan_or_zero, ones_like(lars), lars)
return lars.asscalar()
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
# compute lars
# clip grad + wd * weight is performed after computing lars
lars = self._get_lars(index, weight, grad, wd)
lr *= lars
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
grad += wd * weight
# update mom
mom = state
if mom is not None:
mom[:] *= self.momentum
mom[:] -= lr * grad
else:
mom = -lr * grad
# update weight
weight[:] += mom
def fused_step(self, indices, weights, grads, states):
"""Perform a fused optimization step using gradients and states.
Fused kernel is used for update.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
aggregate = self.aggregate_num > 1
for weight, grad in zip(weights, grads):
aggregate = (aggregate and
weight.stype == 'default' and
grad.stype == 'default')
self._update_count(indices)
lrs = self._get_lrs(indices)
wds = self._get_wds(indices)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient is not None:
kwargs['clip_gradient'] = self.clip_gradient
if aggregate:
nb_params = len(indices)
names = [self.idx2name[i] if i in self.idx2name else str(i) for i in indices]
lars_idx = [i for i in range(nb_params) if
not(names[i].endswith('gamma') or names[i].endswith('beta') or
names[i].endswith('bias'))]
nb_lars = len(lars_idx)
no_lars_idx = [i for i in range(nb_params) if
(names[i].endswith('gamma') or names[i].endswith('beta') or
names[i].endswith('bias'))]
cur_ctx = weights[0].context
full_idx = lars_idx + no_lars_idx
new_lrs = array([lrs[i] for i in full_idx], ctx=cur_ctx, dtype='float32')
new_wds = array([wds[i] for i in full_idx], ctx=cur_ctx, dtype='float32')
new_weights = [weights[i] for i in full_idx]
new_grads = [grads[i] for i in full_idx]
new_states = [states[i] for i in full_idx]
if nb_lars > 0:
w_sum_sq = multi_sum_sq(*new_weights[:nb_lars], num_arrays=nb_lars)
g_sum_sq = multi_sum_sq(*new_grads[:nb_lars], num_arrays=nb_lars)
multi_lars(new_lrs[:nb_lars], w_sum_sq, g_sum_sq, new_wds[:nb_lars],
eta=self.eta, eps=self.epsilon, rescale_grad=self.rescale_grad,
out=new_lrs[:nb_lars])
# Same than usual using preloaded sgd functions
multi_precision = self.multi_precision and weights[0].dtype == numpy.float16
if not multi_precision:
if self.momentum > 0:
preloaded_multi_sgd_mom_update(
*(_flatten_list(zip(new_weights, new_grads, new_states)) +
[new_lrs, new_wds]), out=new_weights, num_weights=len(new_weights),
**kwargs)
else:
preloaded_multi_sgd_update(
*(_flatten_list(zip(new_weights, new_grads)) +
[new_lrs, new_wds]), out=new_weights, num_weights=len(new_weights),
**kwargs)
else:
states = list(zip(*states))
weights32, moms = states
if self.momentum > 0:
preloaded_multi_mp_sgd_mom_update(
*(_flatten_list(zip(new_weights, new_grads, moms, weights32)) +
[new_lrs, new_wds]), out=new_weights, num_weights=len(new_weights),
**kwargs)
else:
preloaded_multi_mp_sgd_update(
*(_flatten_list(zip(new_weights, new_grads, weights32)) +
[new_lrs, new_wds]), out=new_weights, num_weights=len(new_weights),
**kwargs)
else:
for i, (index, weight, grad, state) in enumerate(zip(indices, weights, grads, states)):
wd = wds[i]
lr = lrs[i]
lr *= self._get_lars(index, weight, grad, wd)
multi_precision = self.multi_precision and weights[0].dtype == numpy.float16
if not multi_precision:
mom = state
if state is not None:
sgd_mom_update(weight, grad, mom, out=weight,
lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update,
lr=lr, wd=wd, **kwargs)
else:
weight32, mom = state
if mom is not None:
mp_sgd_mom_update(weight, grad, mom, weight32, out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, weight32, out=weight,
lr=lr, wd=wd, **kwargs)
def update_multi_precision(self, indices, weights, grads, states):
"""Override update_multi_precision.
"""
if self.use_fused_step:
self.update(indices, weights, grads, states)
else:
super(LARS, self).update_multi_precision(indices, weights, grads, states)
| |
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for :py:class:`ironic.conductor.rpcapi.ConductorAPI`.
"""
import copy
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import _utils as messaging_utils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import manager as conductor_manager
from ironic.conductor import rpcapi as conductor_rpcapi
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils as dbutils
CONF = cfg.CONF
class ConductorRPCAPITestCase(tests_base.TestCase):
def test_versions_in_sync(self):
self.assertEqual(
conductor_manager.ConductorManager.RPC_API_VERSION,
conductor_rpcapi.ConductorAPI.RPC_API_VERSION)
class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_node = dbutils.get_test_node(driver='fake-driver')
self.fake_node_obj = objects.Node._from_db_object(
objects.Node(self.context), self.fake_node)
self.fake_portgroup = dbutils.get_test_portgroup()
def test_serialized_instance_has_uuid(self):
self.assertTrue('uuid' in self.fake_node)
def test_get_topic_for_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['other-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
def test_get_topic_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.NoValidHost,
rpcapi.get_topic_for,
self.fake_node_obj)
self.dbapi.register_conductor({'hostname': 'fake-host',
'drivers': ['fake-driver']})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_topic = 'fake-topic.fake-host'
self.assertEqual(expected_topic,
rpcapi.get_topic_for(self.fake_node_obj))
def test_get_topic_for_driver_known_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def test_get_topic_for_driver_unknown_driver(self):
CONF.set_override('host', 'fake-host')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['other-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
def test_get_topic_for_driver_doesnt_cache(self):
CONF.set_override('host', 'fake-host')
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertRaises(exception.DriverNotFound,
rpcapi.get_topic_for_driver,
'fake-driver')
self.dbapi.register_conductor({
'hostname': 'fake-host',
'drivers': ['fake-driver'],
})
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
self.assertEqual('fake-topic.fake-host',
rpcapi.get_topic_for_driver('fake-driver'))
def _test_rpcapi(self, method, rpc_method, **kwargs):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
expected_retval = 'hello world' if rpc_method == 'call' else None
expected_topic = 'fake-topic'
if 'host' in kwargs:
expected_topic += ".%s" % kwargs['host']
target = {
"topic": expected_topic,
"version": kwargs.pop('version', rpcapi.RPC_API_VERSION)
}
expected_msg = copy.deepcopy(kwargs)
self.fake_args = None
self.fake_kwargs = None
def _fake_can_send_version_method(version):
return messaging_utils.version_is_compatible(
rpcapi.RPC_API_VERSION, version)
def _fake_prepare_method(*args, **kwargs):
for kwd in kwargs:
self.assertEqual(kwargs[kwd], target[kwd])
return rpcapi.client
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
with mock.patch.object(rpcapi.client,
"can_send_version") as mock_can_send_version:
mock_can_send_version.side_effect = _fake_can_send_version_method
with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi.client,
rpc_method) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(self.context, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [self.context, method, expected_msg]
for arg, expected_arg in zip(self.fake_args,
expected_args):
self.assertEqual(arg, expected_arg)
def test_update_node(self):
self._test_rpcapi('update_node',
'call',
version='1.1',
node_obj=self.fake_node)
def test_change_node_power_state(self):
self._test_rpcapi('change_node_power_state',
'call',
version='1.6',
node_id=self.fake_node['uuid'],
new_state=states.POWER_ON)
def test_vendor_passthru(self):
self._test_rpcapi('vendor_passthru',
'call',
version='1.20',
node_id=self.fake_node['uuid'],
driver_method='test-driver-method',
http_method='test-http-method',
info={"test_info": "test_value"})
def test_driver_vendor_passthru(self):
self._test_rpcapi('driver_vendor_passthru',
'call',
version='1.20',
driver_name='test-driver-name',
driver_method='test-driver-method',
http_method='test-http-method',
info={'test_key': 'test_value'})
def test_do_node_deploy(self):
self._test_rpcapi('do_node_deploy',
'call',
version='1.22',
node_id=self.fake_node['uuid'],
rebuild=False,
configdrive=None)
def test_do_node_tear_down(self):
self._test_rpcapi('do_node_tear_down',
'call',
version='1.6',
node_id=self.fake_node['uuid'])
def test_validate_driver_interfaces(self):
self._test_rpcapi('validate_driver_interfaces',
'call',
version='1.5',
node_id=self.fake_node['uuid'])
def test_destroy_node(self):
self._test_rpcapi('destroy_node',
'call',
version='1.9',
node_id=self.fake_node['uuid'])
def test_get_console_information(self):
self._test_rpcapi('get_console_information',
'call',
version='1.11',
node_id=self.fake_node['uuid'])
def test_set_console_mode(self):
self._test_rpcapi('set_console_mode',
'call',
version='1.11',
node_id=self.fake_node['uuid'],
enabled=True)
def test_update_port(self):
fake_port = dbutils.get_test_port()
self._test_rpcapi('update_port',
'call',
version='1.13',
port_obj=fake_port)
def test_get_driver_properties(self):
self._test_rpcapi('get_driver_properties',
'call',
version='1.16',
driver_name='fake-driver')
def test_set_boot_device(self):
self._test_rpcapi('set_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'],
device=boot_devices.DISK,
persistent=False)
def test_get_boot_device(self):
self._test_rpcapi('get_boot_device',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_supported_boot_devices(self):
self._test_rpcapi('get_supported_boot_devices',
'call',
version='1.17',
node_id=self.fake_node['uuid'])
def test_get_node_vendor_passthru_methods(self):
self._test_rpcapi('get_node_vendor_passthru_methods',
'call',
version='1.21',
node_id=self.fake_node['uuid'])
def test_get_driver_vendor_passthru_methods(self):
self._test_rpcapi('get_driver_vendor_passthru_methods',
'call',
version='1.21',
driver_name='fake-driver')
def test_inspect_hardware(self):
self._test_rpcapi('inspect_hardware',
'call',
version='1.24',
node_id=self.fake_node['uuid'])
def test_continue_node_clean(self):
self._test_rpcapi('continue_node_clean',
'cast',
version='1.27',
node_id=self.fake_node['uuid'])
def test_get_raid_logical_disk_properties(self):
self._test_rpcapi('get_raid_logical_disk_properties',
'call',
version='1.30',
driver_name='fake-driver')
def test_set_target_raid_config(self):
self._test_rpcapi('set_target_raid_config',
'call',
version='1.30',
node_id=self.fake_node['uuid'],
target_raid_config='config')
def test_do_node_clean(self):
clean_steps = [{'step': 'upgrade_firmware', 'interface': 'deploy'},
{'step': 'upgrade_bmc', 'interface': 'management'}]
self._test_rpcapi('do_node_clean',
'call',
version='1.32',
node_id=self.fake_node['uuid'],
clean_steps=clean_steps)
def test_object_action(self):
self._test_rpcapi('object_action',
'call',
version='1.31',
objinst='fake-object',
objmethod='foo',
args=tuple(),
kwargs=dict())
def test_object_class_action_versions(self):
self._test_rpcapi('object_class_action_versions',
'call',
version='1.31',
objname='fake-object',
objmethod='foo',
object_versions={'fake-object': '1.0'},
args=tuple(),
kwargs=dict())
def test_object_backport_versions(self):
self._test_rpcapi('object_backport_versions',
'call',
version='1.31',
objinst='fake-object',
object_versions={'fake-object': '1.0'})
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_action_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_action, self.context,
objinst='fake-object', objmethod='foo',
args=tuple(), kwargs=dict())
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_class_action_versions_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_class_action_versions, self.context,
objname='fake-object', objmethod='foo',
object_versions={'fake-object': '1.0'},
args=tuple(), kwargs=dict())
@mock.patch.object(messaging.RPCClient, 'can_send_version', autospec=True)
def test_object_backport_versions_invalid_version(self, mock_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
mock_send.return_value = False
self.assertRaises(NotImplementedError,
rpcapi.object_backport_versions, self.context,
objinst='fake-object',
object_versions={'fake-object': '1.0'})
def test_update_portgroup(self):
self._test_rpcapi('update_portgroup',
'call',
version='1.33',
portgroup_obj=self.fake_portgroup)
def test_destroy_portgroup(self):
self._test_rpcapi('destroy_portgroup',
'call',
version='1.33',
portgroup=self.fake_portgroup)
| |
import pyparsing as pp
from netlib import http
from netlib.http import user_agents, Headers
from . import base, message
"""
Normal HTTP requests:
<method>:<path>:<header>:<body>
e.g.:
GET:/
GET:/:h"foo"="bar"
POST:/:h"foo"="bar":b'content body payload'
Normal HTTP responses:
<code>:<header>:<body>
e.g.:
200
302:h"foo"="bar"
404:h"foo"="bar":b'content body payload'
Individual HTTP/2 frames:
h2f:<payload_length>:<type>:<flags>:<stream_id>:<payload>
e.g.:
h2f:0:PING
h2f:42:HEADERS:END_HEADERS:0x1234567:foo=bar,host=example.com
h2f:42:DATA:END_STREAM,PADDED:0x1234567:'content body payload'
"""
def get_header(val, headers):
"""
Header keys may be Values, so we have to "generate" them as we try the
match.
"""
for h in headers:
k = h.key.get_generator({})
if len(k) == len(val) and k[:].lower() == val.lower():
return h
return None
class _HeaderMixin(object):
unique_name = None
def values(self, settings):
return (
self.key.get_generator(settings),
self.value.get_generator(settings),
)
class _HTTP2Message(message.Message):
@property
def actions(self):
return [] # self.toks(actions._Action)
@property
def headers(self):
headers = self.toks(_HeaderMixin)
if not self.raw:
if not get_header(b"content-length", headers):
if not self.body:
length = 0
else:
length = len(self.body.string())
headers.append(
Header(
base.TokValueLiteral("content-length"),
base.TokValueLiteral(str(length)),
)
)
return headers
@property
def raw(self):
return bool(self.tok(Raw))
@property
def body(self):
return self.tok(Body)
def resolve(self, settings):
return self
class StatusCode(base.Integer):
pass
class Method(base.OptionsOrValue):
options = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
]
class Path(base.Value):
pass
class Header(_HeaderMixin, base.KeyValue):
preamble = "h"
class ShortcutContentType(_HeaderMixin, base.Value):
preamble = "c"
key = base.TokValueLiteral("content-type")
class ShortcutLocation(_HeaderMixin, base.Value):
preamble = "l"
key = base.TokValueLiteral("location")
class ShortcutUserAgent(_HeaderMixin, base.OptionsOrValue):
preamble = "u"
options = [i[1] for i in user_agents.UASTRINGS]
key = base.TokValueLiteral("user-agent")
def values(self, settings):
value = self.value.val
if self.option_used:
value = user_agents.get_by_shortcut(value.lower().decode())[2].encode()
return (
self.key.get_generator(settings),
value
)
class Raw(base.CaselessLiteral):
TOK = "r"
class Body(base.Value):
preamble = "b"
class Times(base.Integer):
preamble = "x"
class Response(_HTTP2Message):
unique_name = None
comps = (
Header,
Body,
ShortcutContentType,
ShortcutLocation,
Raw,
)
def __init__(self, tokens):
super(Response, self).__init__(tokens)
self.rendered_values = None
self.stream_id = 2
@property
def status_code(self):
return self.tok(StatusCode)
@classmethod
def expr(cls):
parts = [i.expr() for i in cls.comps]
atom = pp.MatchFirst(parts)
resp = pp.And(
[
StatusCode.expr(),
pp.ZeroOrMore(base.Sep + atom)
]
)
resp = resp.setParseAction(cls)
return resp
def values(self, settings):
if self.rendered_values:
return self.rendered_values
else:
headers = Headers([header.values(settings) for header in self.headers])
body = self.body
if body:
body = body.string()
resp = http.Response(
b'HTTP/2.0',
self.status_code.string(),
b'',
headers,
body,
)
resp.stream_id = self.stream_id
self.rendered_values = settings.protocol.assemble(resp)
return self.rendered_values
def spec(self):
return ":".join([i.spec() for i in self.tokens])
class NestedResponse(base.NestedMessage):
preamble = "s"
nest_type = Response
class Request(_HTTP2Message):
comps = (
Header,
ShortcutContentType,
ShortcutUserAgent,
Raw,
NestedResponse,
Body,
Times,
)
logattrs = ["method", "path"]
def __init__(self, tokens):
super(Request, self).__init__(tokens)
self.rendered_values = None
self.stream_id = 1
@property
def method(self):
return self.tok(Method)
@property
def path(self):
return self.tok(Path)
@property
def nested_response(self):
return self.tok(NestedResponse)
@property
def times(self):
return self.tok(Times)
@classmethod
def expr(cls):
parts = [i.expr() for i in cls.comps]
atom = pp.MatchFirst(parts)
resp = pp.And(
[
Method.expr(),
base.Sep,
Path.expr(),
pp.ZeroOrMore(base.Sep + atom)
]
)
resp = resp.setParseAction(cls)
return resp
def values(self, settings):
if self.rendered_values:
return self.rendered_values
else:
path = self.path.string()
if self.nested_response:
path += self.nested_response.parsed.spec().encode()
headers = Headers([header.values(settings) for header in self.headers])
body = self.body
if body:
body = body.string()
req = http.Request(
b'',
self.method.string(),
b'http',
b'',
b'',
path,
(2, 0),
headers,
body,
)
req.stream_id = self.stream_id
self.rendered_values = settings.protocol.assemble(req)
return self.rendered_values
def spec(self):
return ":".join([i.spec() for i in self.tokens])
def make_error_response(reason, body=None):
tokens = [
StatusCode("800"),
Body(base.TokValueLiteral("pathod error: " + (body or reason))),
]
return Response(tokens)
| |
import re
import os
import nltk
import zlib
import codecs
import shutil
import logging
from unidecode import unidecode
from indra.literature.pmc_client import extract_text
from indra.resources.greek_alphabet import greek_alphabet
logger = logging.getLogger(__name__)
class IsiPreprocessor(object):
"""Preprocess a set of documents, one by one, and add the preprocessed
text to a temporary directory in a format suitable for the ISI reader.
The ISI reader requires plain text with one sentence per line.
Attributes
----------
preprocessed_dir : str
The directory holding the literature text preprocessed and sentence
tokenized in a format suitable for the ISI reader
next_file_id : int
The next file with preprocessed text will be named next_file_id.txt
pmids : dict
A dictionary mapping file ids to the pmid of the text corresponding
to that file, can be None if unknown
extra_annotations : dict
A dictionary mapping file ids to a (possibly empty) dictionary with
additional annotations to include for statements extracted from this
document
"""
def __init__(self, preprocessed_dir):
preprocessed_dir = os.path.abspath(preprocessed_dir)
self.preprocessed_dir = preprocessed_dir
self.next_file_id = 1
self.pmids = {}
self.extra_annotations = {}
# This directory should be empty
contents = os.listdir(preprocessed_dir)
if len(contents) != 0:
logger.warning('IsiPreprocessor should get an empty directory in' +
' which to store preprocessed files.')
def register_preprocessed_file(self, infile, pmid, extra_annotations):
"""Set up already preprocessed text file for reading with ISI reader.
This is essentially a mock function to "register" already preprocessed
files and get an IsiPreprocessor object that can be passed to
the IsiProcessor.
Parameters
----------
infile : str
Path to an already preprocessed text file (i.e. one ready to
be sent for reading to ISI reader).
pmid : str
The PMID corresponding to the file
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
infile_base = os.path.basename(infile)
outfile = os.path.join(self.preprocessed_dir, infile_base)
shutil.copyfile(infile, outfile)
infile_key = os.path.splitext(infile_base)[0]
self.pmids[infile_key] = pmid
self.extra_annotations[infile_key] = extra_annotations
def preprocess_plain_text_string(self, text, pmid, extra_annotations):
"""Preprocess plain text string for use by ISI reader.
Preprocessing is done by tokenizing into sentences and writing
each sentence on its own line in a plain text file. All other
preprocessing functions ultimately call this one.
Parameters
----------
text : str
The plain text of the article of abstract
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
output_file = '%s.txt' % self.next_file_id
output_file = os.path.join(self.preprocessed_dir, output_file)
# Replace greek characters with corresponding strings
for greek_letter, spelled_letter in greek_alphabet.items():
text = text.replace(greek_letter, spelled_letter)
# Replace all other unicode characters with nearest ascii equivalents
text = unidecode(text)
# Tokenize sentence
sentences = nltk.sent_tokenize(text)
# Write sentences to text file
first_sentence = True
with codecs.open(output_file, 'w', encoding='utf-8') as f:
for sentence in sentences:
if not first_sentence:
f.write('\n')
f.write(sentence.rstrip())
first_sentence = False
# Store annotations
self.pmids[str(self.next_file_id)] = pmid
self.extra_annotations[str(self.next_file_id)] = extra_annotations
# Increment file id
self.next_file_id += 1
def preprocess_plain_text_file(self, filename, pmid, extra_annotations):
"""Preprocess a plain text file for use with ISI reder.
Preprocessing results in a new text file with one sentence
per line.
Parameters
----------
filename : str
The name of the plain text file
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
content = f.read()
self.preprocess_plain_text_string(content, pmid,
extra_annotations)
def preprocess_nxml_file(self, filename, pmid, extra_annotations):
"""Preprocess an NXML file for use with the ISI reader.
Preprocessing is done by extracting plain text from NXML and then
creating a text file with one sentence per line.
Parameters
----------
filename : str
Filename (more specifically the file path) of an nxml file to
process
pmid : str
The PMID from which it comes, or None if not specified
extra_annotations : dict
Extra annotations to be added to each statement, possibly including
metadata about the source (annotations with the key "interaction"
will be overridden)
"""
with open(filename, 'r') as fh:
txt_content = extract_text(fh.read())
# We need to remove some common LaTEX commands from the converted text
# or the reader will get confused
cmd1 = r'[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}'
cmd2 = r'[^ \{\}]+\{[^\{\}]+\}'
txt_content = re.sub(cmd1, '', txt_content)
txt_content = re.sub(cmd2, '', txt_content)
# Prepocess text extracted from nxml
self.preprocess_plain_text_string(txt_content, pmid, extra_annotations)
def preprocess_abstract_list(self, abstract_list):
"""Preprocess abstracts in database pickle dump format for ISI reader.
For each abstract, creates a plain text file with one sentence per
line, and stores metadata to be included with each statement from
that abstract.
Parameters
----------
abstract_list : list[dict]
Compressed abstracts with corresopnding metadata in INDRA database
pickle dump format.
"""
for abstract_struct in abstract_list:
abs_format = abstract_struct['format']
content_type = abstract_struct['text_type']
content_zipped = abstract_struct['content']
tcid = abstract_struct['tcid']
trid = abstract_struct['trid']
assert(abs_format == 'text')
assert(content_type == 'abstract')
pmid = None # Don't worry about pmid for now
extra_annotations = {'tcid': tcid, 'trid': trid}
# Uncompress content
content = zlib.decompress(content_zipped,
zlib.MAX_WBITS+16).decode('utf-8')
self.preprocess_plain_text_string(content, pmid, extra_annotations)
def iter_outputs(self, output_dir):
"""Iterate over the outputs in a given directory using stored metadata.
For each of the output JSONs, retrieve the extra annotations for that
file, and link the file with its corresponding PMID.
Parameters
----------
output_dir : str
The path to the directory where the JSON outputs were dumped.
"""
for basename, pmid in self.pmids.items():
fname = os.path.join(output_dir, '%s.json' % basename)
extra_annotations = self.extra_annotations.get(fname, {})
yield fname, pmid, extra_annotations
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorArray: a dynamically sized array of Tensors."""
# Mixture of pep8 and non-pep8 names, so disable pylint bad-name
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import weakref
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.tf_export import tf_export
# _GraphTensorArray accesses many of the hidden generated ops, but is in
# fact built to wrap these methods.
# pylint: disable=protected-access
class _GraphTensorArray(object):
"""Graph-mode implementation of TensorArray.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Constructs a graph mode TensorArray.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None. Only supported in graph mode.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`. Only supported in graph mode.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
self._dynamic_size = None
dynamic_size = dynamic_size or False
self._dtype = dtype
# Used to keep track of what tensors the TensorArray should be
# colocated with. We choose to colocate the TensorArray with the
# first tensor written to it.
self._colocate_with_first_write_call = colocate_with_first_write_call
if colocate_with_first_write_call:
self._colocate_with = []
else:
self._colocate_with = None
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
def create():
"""Create the TensorArray op."""
return gen_data_flow_ops.tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
identical_element_shapes=infer_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
if colocate_with_first_write_call:
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = create()
else:
self._handle, self._flow = create()
@property
def flow(self):
return self._flow
@property
def dtype(self):
return self._dtype
@property
def handle(self):
return self._handle
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
@contextlib.contextmanager
def _maybe_colocate_with(self, value):
"""Colocate operations with an internal colocation group or `value`.
Args:
value: `Tensor`, the tensor to try to colocate with.
Yields:
Does not yield anything, but the new context is a colocation context.
If no internal colocation group is set, colocate with `value` and set
the internal colocation group to be value.
"""
if not self._colocate_with_first_write_call:
yield
else:
if not self._colocate_with:
self._colocate_with.append(value)
with ops.colocate_with(self._colocate_with[0]):
yield
def identity(self):
"""See TensorArray."""
flow = array_ops.identity(self._flow)
ta = TensorArray(
dtype=self._dtype,
handle=self._handle,
flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def grad(self, source, flow=None, name=None):
"""See TensorArray."""
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
with ops.colocate_with(self._handle):
g_handle, unused_flow = gen_data_flow_ops.tensor_array_grad_v3(
handle=self._handle, source=source, flow_in=flow, name=name)
with ops.control_dependencies([g_handle]):
flow = array_ops.identity(flow, name="gradient_flow")
g = TensorArray(
dtype=self._dtype,
handle=g_handle,
flow=flow,
infer_shape=self._infer_shape,
colocate_with_first_write_call=False)
g._element_shape = self._element_shape
return g
def read(self, index, name=None):
"""See TensorArray."""
value = gen_data_flow_ops.tensor_array_read_v3(
handle=self._handle,
index=index,
flow_in=self._flow,
dtype=self._dtype,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]):
value = ops.convert_to_tensor(value, name="value")
if self._infer_shape:
self._merge_element_shape(value.shape)
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops.tensor_array_write_v3(
handle=self._handle,
index=index,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype,
handle=self._handle,
flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def stack(self, name=None):
"""See TensorArray."""
with ops.colocate_with(self._handle):
with ops.name_scope(name, "TensorArrayStack", [self._handle]):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
"""See TensorArray."""
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = gen_data_flow_ops.tensor_array_gather_v3(
handle=self._handle,
indices=indices,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""See TensorArray."""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape_except0 = (
tensor_shape.TensorShape(self._element_shape[0].dims[1:]))
else:
element_shape_except0 = tensor_shape.TensorShape(None)
value, _ = gen_data_flow_ops.tensor_array_concat_v3(
handle=self._handle,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape_except0=element_shape_except0)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims[1:])
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
num_elements = array_ops.shape(value)[0]
return self.scatter(
indices=math_ops.range(0, num_elements), value=value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayScatter",
[self._handle, value, indices]):
value = ops.convert_to_tensor(value, name="value")
if self._infer_shape and not context.executing_eagerly():
self._merge_element_shape(value.shape[1:])
with self._maybe_colocate_with(value):
flow_out = gen_data_flow_ops.tensor_array_scatter_v3(
handle=self._handle,
indices=indices,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype,
handle=self._handle,
flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
self._merge_element_shape(
tensor_shape.TensorShape([clengths[0]]).concatenate(
value.shape[1:]))
flow_out = gen_data_flow_ops.tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(
dtype=self._dtype,
handle=self._handle,
flow=flow_out,
colocate_with_first_write_call=self._colocate_with_first_write_call)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
ta._colocate_with = self._colocate_with
return ta
def size(self, name=None):
"""See TensorArray."""
return gen_data_flow_ops.tensor_array_size_v3(
handle=self._handle, flow_in=self.flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""See TensorArray."""
return gen_data_flow_ops.tensor_array_close_v3(
handle=self._handle, name=name)
class _GraphTensorArrayV2(object):
"""Graph-mode implementation of TensorArray backed by TensorLists.
The backing tensor of this TensorArray is a TensorList variant tensor which is
stored in the `flow`. The `handle` is always none here. The reason we use the
`flow` field and not the `handle` field is to ensure backwards compatibility
with legacy control flow.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Constructs a graph mode TensorArray.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if flow is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: (optional) unused. Not supported in TensorLists.
tensor_array_name: (optional) unused.
handle: (optional) Must always be None.
flow: (optional) A variant `Tensor` scalar for a TensorList.
infer_shape: (optional, default: True) If True, shape inference is
enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray. Need
not be fully defined.
colocate_with_first_write_call: (optional). unused.
name: (optional) A name for the operation.
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
assert handle is None
del handle
del clear_after_read
del tensor_array_name
del colocate_with_first_write_call
self._dynamic_size = dynamic_size
if (flow is not None and
(not isinstance(flow, ops.Tensor) or flow.dtype != dtypes.variant)):
raise TypeError("flow must be a variant tensor")
if flow is None and size is None:
raise ValueError("Size must be provided if flow is not provided")
if flow is not None and size is not None:
raise ValueError("Cannot provide both a flow and size "
"at the same time")
if flow is not None and element_shape is not None:
raise ValueError("Cannot provide both a flow and element_shape "
"at the same time")
self._dtype = dtype
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArrayV2", [size, flow]) as scope:
if flow is None:
self._flow = list_ops.tensor_list_reserve(
element_shape=element_shape,
num_elements=size,
element_dtype=dtype,
name=scope)
else:
self._flow = flow
# For backwards compatibility.
self._colocate_with_first_write_call = None
self._colocate_with = None
@property
def flow(self):
return self._flow
@property
def dtype(self):
return self._dtype
@property
def handle(self):
# We intentionally do not raise an error so that legacy while_loop does not
# complain.
return None
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
def identity(self):
"""See TensorArray."""
flow = array_ops.identity(self._flow)
return build_ta_with_new_flow(self, flow)
def grad(self, source, flow=None, name=None):
"""Not supported."""
raise NotImplementedError()
def read(self, index, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayV2Read", [self._flow, index]):
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = list_ops.tensor_list_get_item(
input_handle=self._flow,
index=index,
element_dtype=self._dtype,
element_shape=element_shape,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayV2Write", [self._flow, index, value]):
value = ops.convert_to_tensor(value, name="value")
if self._infer_shape:
self._merge_element_shape(value.shape)
flow_out = list_ops.tensor_list_set_item(
input_handle=self._flow,
index=index,
item=value,
resize_if_index_out_of_bounds=self._dynamic_size,
name=name)
return build_ta_with_new_flow(self, flow_out)
def stack(self, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayV2Stack", [self._flow]):
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = list_ops.tensor_list_stack(
input_handle=self._flow,
element_dtype=self._dtype,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def gather(self, indices, name=None):
"""See TensorArray."""
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = list_ops.tensor_list_gather(
input_handle=self._flow,
indices=indices,
element_dtype=self._dtype,
element_shape=element_shape,
name=name)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""See TensorArray."""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape = [None] + self._element_shape[0].dims[1:]
else:
element_shape = None
value = list_ops.tensor_list_concat(
input_handle=self._flow,
element_dtype=self._dtype,
element_shape=element_shape,
name=name)
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayUnstack", [self._flow, value]):
value = ops.convert_to_tensor(value, name="value")
if self._infer_shape and not context.executing_eagerly():
self._merge_element_shape(value.shape[1:])
flow_out = list_ops.tensor_list_from_tensor(
tensor=value, element_shape=value.shape[1:])
return build_ta_with_new_flow(self, flow_out)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArrayScatter",
[self._flow, value, indices]):
value = ops.convert_to_tensor(value, name="value")
if self._infer_shape and not context.executing_eagerly():
self._merge_element_shape(value.shape[1:])
element_shape = self._element_shape[0] if self._element_shape else None
flow_out = list_ops.tensor_list_scatter(
tensor=value, indices=indices, input_handle=self._flow)
return build_ta_with_new_flow(self, flow_out)
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit", [self._flow, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
lengths_64 = math_ops.to_int64(lengths)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
self._merge_element_shape(
tensor_shape.TensorShape([clengths[0]]).concatenate(
value.shape[1:]))
flow_out = list_ops.tensor_list_split(
tensor=value,
lengths=lengths_64,
element_shape=self._element_shape[0] if self._element_shape else None,
name=name)
return build_ta_with_new_flow(self, flow_out)
def size(self, name=None):
"""See TensorArray."""
return list_ops.tensor_list_length(input_handle=self._flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""See TensorArray."""
return gen_control_flow_ops.no_op(name=name)
# pylint: enable=protected-access
class _EagerTensorArray(object):
"""Eager-compatible implementation of TensorArray.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Constructs a TensorArray compatible with eager execution.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: unused.
handle: unsupported.
flow: unsupported.
infer_shape: used for error checking, same semantics as TensorArray.
element_shape: used for error checking, same semantics as TensorArray.
colocate_with_first_write_call: unsupported.
name: unsupported.
Raises:
ValueError: handle or flow are supplied, or if size is not supplied.
"""
del (flow, tensor_array_name, name) # Unused.
if handle is not None:
raise ValueError("TensorArray handles are not supported when eager "
"execution is enabled.")
if size is None:
raise ValueError("Size must be declared for TensorArrays when eager "
"execution is enabled.")
# These attributes are not meaningful when eager is enabled, but some
# library functions (e.g., those in control_flow_ops.py) access them to
# create new tensor arrays; as such, we define them for the sake of
# compatibility.
self._handle = None
# we assign a dummy value to _flow in case other code assumes it to be
# a Tensor
self._flow = constant_op.constant(0, dtype=dtypes.int32)
self._infer_shape = infer_shape
self._element_shape = element_shape
self._colocate_with_first_write_call = colocate_with_first_write_call
self._dtype = dtype
self._dynamic_size = dynamic_size or False
self._clear_after_read = (
True if clear_after_read is None else clear_after_read)
self._previously_read_indices = []
if isinstance(size, ops.EagerTensor):
size = size.numpy()
self._tensor_array = [None for _ in range(size)]
@property
def flow(self):
"""For compatibility; flows are not meaningful when eager is enabled."""
return self._flow
@property
def dtype(self):
return self._dtype
@property
def handle(self):
"""For compatibility; handles are not meaningful when eager is enabled."""
return self._handle
def identity(self):
"""See TensorArray."""
return self.parent()
def grad(self, source, flow=None, name=None):
raise NotImplementedError(
"TensorArray.grad is not supported when executing eagerly; eager's "
"gradient implementation does not use/need this function to compute "
"gradients of operations that use TensorArrays.")
def read(self, index, name=None):
"""See TensorArray."""
del name # not meaningful when executing eagerly.
if isinstance(index, ops.EagerTensor):
index = index.numpy()
if index < 0:
raise errors_impl.OutOfRangeError(
None, None,
"Reading from negative indices (index %d) is not allowed." % index)
if index >= len(self._tensor_array):
raise errors_impl.OutOfRangeError(
None, None, "Tried to read from index %d but array size is: %d" %
(index, len(self._tensor_array)))
tensor = self._tensor_array[index]
if tensor is None:
if index in self._previously_read_indices:
raise errors_impl.InvalidArgumentError(
None, None,
"Could not read index %d twice because it was cleared after "
"a previous read (perhaps try setting clear_after_read = false?)" %
index)
else:
tensor = self._maybe_zero(index)
if self._clear_after_read:
self._tensor_array[index] = None
self._previously_read_indices.append(index)
return tensor
def _write(self, index, value):
"""Writes `value` into index named by `index`.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The `Tensor` to write to `index`.
Raises:
errors_impl.InvalidArgumentError: `value` dtype does not match dtype.
errors_impl.OutOfRangeError: `index` is out of bounds.
ValueError: shape of `value` is not consistent with inferred shape.
"""
if isinstance(index, ops.EagerTensor):
index = index.numpy()
if index < 0:
raise errors_impl.OutOfRangeError(
None, None,
"Writing to negative indices (index %d) is not allowed." % index)
size = len(self._tensor_array)
if index >= size:
if not self._dynamic_size:
raise errors_impl.OutOfRangeError(
None, None,
"Tried to write to index %d but array is not resizeable and size "
"is: %d" % (index, size))
self._tensor_array.extend([None for _ in range(index - size + 1)])
if not isinstance(value, ops.EagerTensor):
value = ops.convert_to_tensor(value)
if self._infer_shape:
if self._element_shape is None:
self._element_shape = value.shape
elif not self._element_shape.is_compatible_with(value.shape):
raise ValueError("Incompatible shape for value (%s), expected (%s)" %
(value.shape.as_list(), self._element_shape.as_list()))
if self._dtype != value.dtype:
raise errors_impl.InvalidArgumentError(
None, None,
"TensorArray dtype is %s but Op is trying to write dtype %s" %
(self._dtype.name, value.dtype.name))
self._tensor_array[index] = value
def write(self, index, value, name=None):
"""See TensorArray."""
del name # not meaningful when executing eagerly.
self._write(index, value)
return self.parent()
def _maybe_zero(self, ix):
val = self._tensor_array[ix]
if val is None:
val = self._tensor_array[ix] = array_ops.zeros(
shape=self._element_shape, dtype=self._dtype)
return val
def stack(self, name=None):
"""See TensorArray."""
if self._tensor_array:
for ix in range(len(self._tensor_array)):
self._maybe_zero(ix)
return ops.convert_to_tensor(
self._tensor_array, name=name, dtype=self._dtype)
def gather(self, indices, name=None):
"""See TensorArray."""
del name # not meaningful when executing eagerly.
if isinstance(indices, ops.EagerTensor):
indices = indices.numpy()
return array_ops.stack([self._maybe_zero(i) for i in indices])
def concat(self, name=None):
"""See TensorArray."""
try:
return array_ops.concat(
[self._maybe_zero(ix) for ix in range(len(self._tensor_array))],
0, name=name)
except errors_impl.OpError:
# Reproduce a subset of the error-handling for graph-mode TensorArrays.
shapes = [t.shape for t in self._tensor_array]
ndims = [s.ndims for s in shapes]
if 0 in ndims:
idx = ndims.index(0)
raise errors_impl.InvalidArgumentError(
None, None, "Concat saw a scalar shape at index %d but requires "
"at least vectors." % idx)
else:
raise
def unstack(self, value, name=None):
"""See TensorArray."""
tensors = array_ops.unstack(value, name=name)
if len(tensors) > len(self._tensor_array) and not self._dynamic_size:
raise ValueError(
"Cannot unstack %d tensors into a TensorArray of static size %d" %
(len(tensors), len(self._tensor_array)))
self._tensor_array = tensors
return self.parent()
def scatter(self, indices, value, name=None):
"""See TensorArray."""
del name # not meaningful when executing eagerly.
if isinstance(indices, ops.EagerTensor):
indices = indices.numpy()
for index, val in zip(indices, array_ops.unstack(value)):
self._write(index, val) # pylint: disable=protected-access
return self.parent()
def split(self, value, lengths, name=None):
"""See TensorArray."""
# error checking to match graph-mode errors
value = ops.convert_to_tensor(value)
lengths = ops.convert_to_tensor(lengths)
sum_lengths = math_ops.reduce_sum(lengths)
if lengths.shape.ndims != 1:
raise errors_impl.InvalidArgumentError(
None, None, "Expected lengths to be a vector, received shape: %s" %
lengths.shape.as_list())
elif value.shape.ndims == 0:
raise errors_impl.InvalidArgumentError(
None, None, "Expected value to be at least a vector, "
"but received shape: %s" % value.shape.as_list())
elif sum_lengths.numpy() != value.shape.as_list()[0]:
raise errors_impl.InvalidArgumentError(
None, None, "Expected sum of lengths to be equal to "
"values.shape[0], but sum of lengths is %d and "
"value's shape is: %s " % (sum_lengths.numpy(),
value.shape.as_list()))
elif not self._dynamic_size and lengths.shape[0] != len(self._tensor_array):
raise errors_impl.InvalidArgumentError(
None, None, "TensorArray's size is not equal to the size of "
"lengths (%d vs. %d), and the TensorArray is not marked as "
"dynamically resizeable" % (len(self._tensor_array),
lengths.shape[0]))
else:
self._tensor_array = array_ops.split(value, lengths, name=name)
return self.parent()
def size(self, name=None):
"""See TensorArray."""
del name # not meaningful when executing eagerly.
return constant_op.constant(len(self._tensor_array))
def close(self, name=None):
del name # not meaningful when executing eagerly.
del self._tensor_array[:]
# TensorArray is designed to hide an underlying implementation object
# and as such accesses many of that object's hidden fields.
# pylint: disable=protected-access
@tf_export("TensorArray")
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None. Only supported in graph mode.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`. Only supported in graph mode.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if context.executing_eagerly():
implementation = _EagerTensorArray
else:
if control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
implementation = _GraphTensorArrayV2
else:
implementation = _GraphTensorArray
self._implementation = implementation(
dtype,
size=size,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
handle=handle,
flow=flow,
infer_shape=infer_shape,
element_shape=element_shape,
colocate_with_first_write_call=colocate_with_first_write_call,
name=name)
self._implementation.parent = weakref.ref(self)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._implementation._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._implementation._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._implementation.handle
@property
def _dynamic_size(self):
return self._implementation._dynamic_size
@property
def _infer_shape(self):
return self._implementation._infer_shape
@_infer_shape.setter
def _infer_shape(self, infer_shape):
self._implementation._infer_shape = infer_shape
@property
def _element_shape(self):
return self._implementation._element_shape
@_element_shape.setter
def _element_shape(self, element_shape):
self._implementation._element_shape = element_shape
@property
def _colocate_with_first_write_call(self):
return self._implementation._colocate_with_first_write_call
@property
def _colocate_with(self):
return self._implementation._colocate_with
@_colocate_with.setter
def _colocate_with(self, colocate_with):
self._implementation._colocate_with = colocate_with
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object all for subsequent operations.
"""
return self._implementation.identity()
def grad(self, source, flow=None, name=None):
return self._implementation.grad(source, flow=flow, name=name)
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
return self._implementation.read(index, name=name)
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
return self._implementation.write(index, value, name=name)
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
return self._implementation.stack(name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The tensors in the `TensorArray` selected by `indices`, packed into one
tensor.
"""
return self._implementation.gather(indices, name=name)
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
return self._implementation.concat(name=name)
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.unstack(value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.scatter(indices, value, name=name)
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting
`value` along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
return self._implementation.split(value, lengths, name=name)
def size(self, name=None):
"""Return the size of the TensorArray."""
return self._implementation.size(name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
return self._implementation.close(name=name)
def build_ta_with_new_flow(old_ta, flow):
"""Builds a TensorArray with a new `flow` tensor."""
ta = TensorArray(
dtype=old_ta.dtype,
dynamic_size=old_ta._dynamic_size,
handle=old_ta.handle,
flow=flow,
infer_shape=old_ta._infer_shape,
colocate_with_first_write_call=old_ta._colocate_with_first_write_call)
ta._colocate_with = old_ta._colocate_with
ta._element_shape = old_ta._element_shape
return ta
# pylint: enable=protected-access
| |
#########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from manager_rest.deployment_update import utils
from manager_rest.storage import models, get_node
from manager_rest.manager_exceptions import UnknownModificationStageError
from manager_rest.deployment_update.constants import ENTITY_TYPES, ACTION_TYPES
OUTPUT_ENTITY_LEN = 2
WORKFLOW_ENTITY_LEN = 2
OPERATION_ENTITY_LEN = 2
PROPERTY_ENTITY_LEN = 2
RELATIONSHIP_ENTITY_LEN = 4
NODE_ENTITY_LEN = 2
class EntityValidatorBase(object):
def __init__(self, sm):
self.sm = sm
self._validation_mapper = {
ACTION_TYPES.ADD: self._validate_add,
ACTION_TYPES.MODIFY: self._validate_modify,
ACTION_TYPES.REMOVE: self._validate_remove
}
def validate(self, dep_update, step):
try:
self._validate_entity(dep_update, step)
except UnknownModificationStageError as e:
entity_identifier_msg = \
"Entity type {0} with entity id {1}".format(step.entity_type,
step.entity_id)
err_msg = "{0}: {1}".format(entity_identifier_msg, e.message)
raise UnknownModificationStageError(err_msg)
def _validate_entity(self, dep_update, step):
raise NotImplementedError
def _in_old(self, *args, **kwargs):
raise NotImplementedError
def _in_new(self, *args, **kwargs):
raise NotImplementedError
def _validate_add(self, entity_id, entity_type, **kwargs):
if not (self._in_new(**kwargs) and not self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either doesn't exist in the deployment update "
"blueprint or exists in the original deployment blueprint")
def _validate_modify(self, entity_id, entity_type, **kwargs):
if not (self._in_new(**kwargs) and self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either doesn't exist in the deployment update "
"blueprint or it doesn't exists in the original deployment "
"blueprint")
def _validate_remove(self, entity_id, entity_type, **kwargs):
if not (not self._in_new(**kwargs) and self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either exists in the deployment update blueprint "
"or doesn't exists in the original deployment blueprint")
def _get_storage_node(self, deployment_id, node_id):
node = get_node(deployment_id, node_id)
return node.to_dict() if node else {}
class NodeValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
entity_keys = utils.get_entity_keys(step.entity_id)
if len(entity_keys) != NODE_ENTITY_LEN:
return
_, node_id = entity_keys
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id)
def _in_old(self, dep_update, node_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return bool(storage_node)
def _in_new(self, dep_update, node_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return bool(raw_node)
class RelationshipValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
entity_keys = utils.get_entity_keys(step.entity_id)
if len(entity_keys) < RELATIONSHIP_ENTITY_LEN:
return
_, source_node_id, relationships, source_relationship_index = \
entity_keys[:RELATIONSHIP_ENTITY_LEN]
target_relationship_index = entity_keys[RELATIONSHIP_ENTITY_LEN] \
if len(entity_keys) > RELATIONSHIP_ENTITY_LEN else None
# assert the index is indeed readable
source_relationship_index = utils.parse_index(
source_relationship_index)
target_relationship_index = utils.parse_index(
target_relationship_index)
if not (source_relationship_index or target_relationship_index):
return
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
source_node_id=source_node_id,
relationships=relationships,
source_relationship_index=source_relationship_index,
target_relationship_index=target_relationship_index)
def _in_new(self,
dep_update,
source_node_id,
relationships,
source_relationship_index,
target_relationship_index):
source_node = utils.get_raw_node(dep_update.deployment_plan,
source_node_id)
if not (source_node and
len(source_node[relationships]) > source_relationship_index):
return
target_node_id = \
source_node[relationships][source_relationship_index]['target_id']
raw_target_node = utils.get_raw_node(dep_update.deployment_plan,
target_node_id)
return raw_target_node
def _in_old(self,
dep_update,
source_node_id,
relationships,
source_relationship_index,
target_relationship_index):
source_node = self._get_storage_node(dep_update.deployment_id,
source_node_id)
if not (source_node and
len(source_node[relationships]) > target_relationship_index):
return
target_node_id = \
source_node[relationships][target_relationship_index]['target_id']
storage_target_node = self._get_storage_node(dep_update.deployment_id,
target_node_id)
return storage_target_node
class PropertyValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
property_keys = utils.get_entity_keys(step.entity_id)
if len(property_keys) < PROPERTY_ENTITY_LEN:
return
_, node_id = property_keys[:PROPERTY_ENTITY_LEN]
property_id = property_keys[PROPERTY_ENTITY_LEN:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id,
property_id=property_id)
@staticmethod
def _in_new(dep_update, node_id, property_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return utils.traverse_object(raw_node, property_id) is not None
def _in_old(self, dep_update, node_id, property_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return utils.traverse_object(storage_node, property_id) is not None
class OperationValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
operation_keys = utils.get_entity_keys(step.entity_id)
if len(operation_keys) < OPERATION_ENTITY_LEN:
return
_, node_id = operation_keys[:OPERATION_ENTITY_LEN]
operation_id = operation_keys[OPERATION_ENTITY_LEN:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id,
operation_id=operation_id)
def _in_new(self, dep_update, node_id, operation_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return utils.traverse_object(raw_node, operation_id) is not None
def _in_old(self, dep_update, node_id, operation_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return utils.traverse_object(storage_node, operation_id) is not None
class WorkflowValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
workflow_keys = utils.get_entity_keys(step.entity_id)
if len(workflow_keys) < WORKFLOW_ENTITY_LEN:
return
workflows = workflow_keys[0]
workflow_id = workflow_keys[1:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
workflow_id=workflow_id,
workflows=workflows)
@staticmethod
def _in_new(dep_update, workflow_id, workflows):
raw_workflows = dep_update.deployment_plan[workflows]
return utils.traverse_object(raw_workflows, workflow_id) is not None
def _in_old(self, dep_update, workflow_id, workflows):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_workflows = deployment.workflows or {}
return utils.traverse_object(storage_workflows,
workflow_id) is not None
class OutputValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
output_keys = utils.get_entity_keys(step.entity_id)
if len(output_keys) < OUTPUT_ENTITY_LEN:
return
outputs = output_keys[0]
output_id = output_keys[1:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
output_id=output_id,
outputs=outputs)
@staticmethod
def _in_new(dep_update, output_id, outputs):
raw_outputs = dep_update.deployment_plan[outputs]
return utils.traverse_object(raw_outputs, output_id) is not None
def _in_old(self, dep_update, output_id, outputs):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_outputs = deployment.outputs or {}
return utils.traverse_object(storage_outputs, output_id) is not None
class DescriptionValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
description_key = step.entity_id
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
description_key=description_key)
def _in_new(self, dep_update, description_key):
raw_description = dep_update.deployment_plan[description_key]
return bool(raw_description)
def _in_old(self, dep_update, description_key):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_description = deployment.description or {}
return bool(storage_description)
class StepValidator(object):
def __init__(self, sm):
self._validation_mapper = {
ENTITY_TYPES.NODE: NodeValidator(sm),
ENTITY_TYPES.RELATIONSHIP: RelationshipValidator(sm),
ENTITY_TYPES.PROPERTY: PropertyValidator(sm),
ENTITY_TYPES.OPERATION: OperationValidator(sm),
ENTITY_TYPES.WORKFLOW: WorkflowValidator(sm),
ENTITY_TYPES.OUTPUT: OutputValidator(sm),
ENTITY_TYPES.DESCRIPTION: DescriptionValidator(sm)
}
def validate(self, dep_update, step):
"""
validate an entity id of provided type exists in provided blueprint.
raises error if id doesn't exist
:param dep_update: the deployment update object.
:param step: the deployment update step object
:return: None
"""
if step.entity_type in ENTITY_TYPES:
self._validation_mapper[step.entity_type].validate(dep_update,
step)
| |
import mock
import pytest
import hmac
import hashlib
import lxml.etree
from django.utils import timezone
from osf_tests import factories
from website import settings
@pytest.mark.django_db
class TestCrossRefEmailResponse:
def make_mailgun_payload(self, crossref_response):
mailgun_payload = {
'From': ['CrossRef <admin@crossref.org>'],
'To': ['test@test.osf.io'],
'subject': ['CrossRef submission ID: 1390671938'],
'from': ['CrossRef <test-admin@crossref.org>'],
'Date': ['Fri, 27 Apr 2018 11:38:00 -0400 (EDT)'],
'body-plain': [crossref_response.strip()],
'Mime-Version': ['1.0'],
'timestamp': '123',
'recipient': ['test@test.osf.io'],
'sender': ['test-admin@crossref.org'],
'Content-Type': [u'text/plain; charset="UTF-8"'],
'Subject': [u'CrossRef submission ID: 1390671938'],
'token': 'secret'
}
# temporarily override MAILGUN_API_KEY
settings.MAILGUN_API_KEY = 'notsosecret'
data = {
'X-Mailgun-Sscore': 0,
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format(
mailgun_payload['timestamp'],
mailgun_payload['token']
),
digestmod=hashlib.sha256,
).hexdigest(),
}
data.update(mailgun_payload)
data = {
key: value
for key, value in data.items()
if value is not None
}
return data
@pytest.fixture()
def preprint(self):
return factories.PreprintFactory(set_doi=False)
@pytest.fixture()
def error_xml(self, preprint):
return """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="cs3.crossref.org">
<submission_id>1390675109</submission_id>
<batch_id>{}</batch_id>
<record_diagnostic status="Failure">
<doi />
<msg>Error: cvc-complex-type.2.4.a: Invalid content was found starting with element 'program'</msg>
</record_diagnostic>
<batch_data>
<record_count>1</record_count>
<success_count>0</success_count>
<warning_count>0</warning_count>
<failure_count>1</failure_count>
</batch_data>
</doi_batch_diagnostic>
""".format(preprint._id)
@pytest.fixture()
def success_xml(self, preprint):
return """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="cs3.crossref.org">
<submission_id>1390675475</submission_id>
<batch_id>{}</batch_id>
<record_diagnostic status="Success">
<doi>10.31219/FK2OSF.IO/{}</doi>
<msg>Successfully added</msg>
</record_diagnostic>
<batch_data>
<record_count>1</record_count>
<success_count>1</success_count>
<warning_count>0</warning_count>
<failure_count>0</failure_count>
</batch_data>
</doi_batch_diagnostic>
""".format(preprint._id, preprint._id)
@pytest.fixture()
def update_success_xml(self, preprint):
return """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="cs3.crossref.org">
<submission_id>1390757455</submission_id>
<batch_id>{}</batch_id>
<record_diagnostic status="Success">
<doi>10.31219/FK2osf.io/{}</doi>
<msg>Successfully updated</msg>
</record_diagnostic>
<batch_data>
<record_count>1</record_count>
<success_count>1</success_count>
<warning_count>0</warning_count>
<failure_count>0</failure_count>
</batch_data>
</doi_batch_diagnostic>
""".format(preprint._id, preprint._id)
def build_batch_success_xml(self, preprint_list):
preprint_count = len(preprint_list)
base_xml_string = """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="cs3.crossref.org">
<submission_id>1390758391</submission_id>
<batch_id>1528233706</batch_id>
<batch_data>
<record_count>{}</record_count>
<success_count>{}</success_count>
<warning_count>0</warning_count>
<failure_count>0</failure_count>
</batch_data>
</doi_batch_diagnostic>
""".format(preprint_count, preprint_count)
base_xml = lxml.etree.fromstring(base_xml_string.strip())
provider_prefix = preprint_list[0].provider.doi_prefix
for preprint in preprint_list:
record_diagnostic = lxml.etree.Element('record_diagnostic')
record_diagnostic.attrib['status'] = 'Success'
doi = lxml.etree.Element('doi')
doi.text = settings.DOI_FORMAT.format(prefix=provider_prefix, guid=preprint._id)
msg = lxml.etree.Element('msg')
msg.text = 'Successfully added'
record_diagnostic.append(doi)
record_diagnostic.append(msg)
base_xml.append(record_diagnostic)
return lxml.etree.tostring(base_xml, pretty_print=False)
@pytest.fixture()
def url(self):
return '/_/crossref/email/'
def test_wrong_request_context_raises_permission_error(self, app, url, error_xml):
mailgun_response = self.make_mailgun_payload(error_xml)
mailgun_response.pop('signature')
response = app.post(url, mailgun_response, expect_errors=True)
assert response.status_code == 400
def test_error_response_sends_message_does_not_set_doi(self, app, url, preprint, error_xml):
assert not preprint.get_identifier_value('doi')
with mock.patch('framework.auth.views.mails.send_mail') as mock_send_mail:
context_data = self.make_mailgun_payload(crossref_response=error_xml)
app.post(url, context_data)
assert mock_send_mail.called
assert not preprint.get_identifier_value('doi')
def test_success_response_sets_doi(self, app, url, preprint, success_xml):
assert not preprint.get_identifier_value('doi')
with mock.patch('framework.auth.views.mails.send_mail') as mock_send_mail:
context_data = self.make_mailgun_payload(crossref_response=success_xml)
app.post(url, context_data)
preprint.reload()
assert not mock_send_mail.called
assert preprint.get_identifier_value('doi')
assert preprint.preprint_doi_created
def test_update_success_response(self, app, preprint, url, update_success_xml):
initial_value = 'TempDOIValue'
preprint.set_identifier_value(category='doi', value=initial_value)
update_xml = self.update_success_xml(preprint)
with mock.patch('framework.auth.views.mails.send_mail') as mock_send_mail:
context_data = self.make_mailgun_payload(crossref_response=update_xml)
app.post(url, context_data)
assert not mock_send_mail.called
assert preprint.get_identifier_value(category='doi') != initial_value
def test_update_success_does_not_set_preprint_doi_created(self, app, preprint, url, update_success_xml):
preprint.set_identifier_value(category='doi', value='test')
preprint.preprint_doi_created = timezone.now()
preprint.save()
update_xml = self.update_success_xml(preprint)
pre_created = preprint.preprint_doi_created
with mock.patch('framework.auth.views.mails.send_mail'):
context_data = self.make_mailgun_payload(crossref_response=update_xml)
app.post(url, context_data)
assert preprint.preprint_doi_created == pre_created
def test_success_batch_response(self, app, url):
provider = factories.PreprintProviderFactory()
provider.doi_prefix = '10.123yeah'
provider.save()
preprint_list = [factories.PreprintFactory(set_doi=False, provider=provider) for _ in range(5)]
xml_response = self.build_batch_success_xml(preprint_list)
context_data = self.make_mailgun_payload(xml_response)
app.post(url, context_data)
for preprint in preprint_list:
assert preprint.get_identifier_value('doi') == settings.DOI_FORMAT.format(prefix=provider.doi_prefix, guid=preprint._id)
def test_confirmation_marks_legacy_doi_as_deleted(self, app, url, preprint, update_success_xml):
legacy_value = 'IAmALegacyDOI'
preprint.set_identifier_value(category='legacy_doi', value=legacy_value)
update_xml = self.update_success_xml(preprint)
with mock.patch('framework.auth.views.mails.send_mail') as mock_send_mail:
context_data = self.make_mailgun_payload(crossref_response=update_xml)
app.post(url, context_data)
assert not mock_send_mail.called
assert preprint.identifiers.get(category='legacy_doi').deleted
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autograd import core as ag_core
import numpy as np
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
# TODO(agarwal): rename to TensorHandle.
class Tensor(tf_ops.Tensor):
"""A TensorFlow Eager Tensor."""
def __init__(self, value, dtype=None):
"""Creates a Tensor object from a Python object or numpy array.
May share storage with the numpy array, in which case changes to the numpy
object will reflect
in the Tensor.
Arguments:
value: A numpy.array or a Python object to create a Tensor for.
dtype: TensorFlow dtype for the returned Tensor. If None, one will be
automatically selected.
"""
# TODO(ashankar): Evaluate if we can and perhaps share code with
# tf.constant defined in
# https://www.tensorflow.org/code/tensorflow/python/framework/constant_op.py
self._id = tf_ops.uid()
if not isinstance(value, np.ndarray):
npt = None if dtype is None else dtype.as_numpy_dtype
value = np.array(value, dtype=npt)
if dtype is None:
value = _maybe_modify_numpy_dtype_determination(value)
elif dtype is not None:
npt = dtype.as_numpy_dtype
if npt != value.dtype:
value = value.astype(npt)
try:
value = np.asarray(value, order="C")
self._handle = pywrap_tensorflow.TFE_Py_NumpyToTensorHandle(value)
except core._NotOkStatusException as e: # pylint: disable=protected-access
raise core._status_to_exception(e.code, e.message) # pylint: disable=protected-access
# Almost all TensorFlow kernels for GPU devices keep int32 tensors in host
# memory. This change approximates the same behavior for eager execution -
# keeping int32 tensors in host memory.
#
# We do so to preclude the need for callers into such kernels from having to
# explicitly place the int32 tensors in host memory. For example, prior to
# this change one needed:
#
# with tfe.device('/gpu:0'):
# ... # code here
# with tfe.device('/cpu:0'):
# shape = tfe.Tensor(...)
# y = tfe.ops.random_uniform(.., shape)
#
# Without the CPU device block tfe.ops.random_uniform would fail since the
# kernel expects the shape in host memory.
#
# After this change, we simplify the code:
#
# with tfe.device('/gpu:0'):
# y = tfe.ops.random_uniform(, tfe.Tensor(...))
#
# The approximation is not exact since if there are GPU kernels which do not
# require host memory for int32 tensors, there will be a discrepancy between
# eager execution and TensorFlow graphs. However, as of July 2017, there
# were no known GPU kernels that kept int32 tensors in device memory.
if _in_gpu_device() and value.dtype != np.int32:
ctx = context.get_default_context()
# pylint: disable=protected-access
device_name = ctx.device_name
with errors.raise_exception_on_not_ok_status() as status:
self._handle = pywrap_tensorflow.TFE_TensorHandleCopyToDevice(
self._handle, ctx._handle, device_name, status)
# pylint: enable=protected-access
self._dtype = dtypes.as_dtype(
pywrap_tensorflow.TFE_TensorHandleDataType(self._handle))
# This mirrors tensorflow.core.framework.ops.Tensor._handle_data Which will
# be None for tensors of type other than DT_REOSURCE. For DT_RESOURCE
# tensors, this will contain a serialized HandleData proto with shape
# inference metadata about shapes and dtypes of resources accessible from
# this handle.
self._handle_data = None
if core.active_trace() is not None:
core.active_trace().record_tensor("MANUAL",
tape.tensor_id(self),
self.device,
self.shape.num_elements())
def __del__(self):
if (pywrap_tensorflow is not None
and pywrap_tensorflow.TFE_DeleteTensorHandle is not None):
pywrap_tensorflow.TFE_DeleteTensorHandle(self._handle)
if core.active_trace() is not None:
core.active_trace().delete_tensor(tape.tensor_id(self))
def __str__(self):
if self.dtype.is_numpy_compatible and self.shape.num_elements() > 0:
n = self.numpy().reshape(-1)
if self.shape.num_elements() > 5:
return "tfe.Tensor(%s..., shape=%s, dtype=%s)" % (n[:5], self.shape,
self.dtype.name)
else:
return "tfe.Tensor(%s, dtype=%s)" % (
np.array_str(self.numpy()).replace("\n", ""), self.dtype.name)
return "tfe.Tensor(<unprintable>, shape=%s dtype=%s)" % (self.shape,
self.dtype.name)
def __repr__(self):
if self.dtype.is_numpy_compatible and self.shape.num_elements() > 0:
n = self.numpy()
# TODO(apassos): understand why self.numpy() sometimes returns not
# an array.
if isinstance(n, np.ndarray):
n = n.reshape(-1)
if self.shape.num_elements() > 5:
return "<tfe.Tensor at %s shape=%s dtype=%s>(%s..., min=%s, max=%s)" % (
self._id, self.shape, self.dtype.name, n[:5], np.min(n), np.max(n))
else:
return "<tfe.Tensor at %s shape=%s dtype=%s>(%s)" % (self._id,
self.shape,
self.dtype.name, n)
return "<tfe.Tensor at %s shape=%s dtype=%s>" % (self._id, self.shape,
self.dtype.name)
@staticmethod
def _override_operator(name, func):
setattr(Tensor, name, func)
def numpy(self):
"""Returns a numpy array with the same contents as the Tensor.
The contents of the Tensor must be backed by host memory. The
as_cpu_tensor() method can be used ensure that this is true.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array that may share memory with the Tensor object. Any changes
to one may be reflected in the other.
"""
# TODO(ashankar): This with status business seems expensive. Profile/avoid?
cpu = self.as_cpu_tensor()
with errors.raise_exception_on_not_ok_status() as status:
return pywrap_tensorflow.TFE_Py_TensorHandleToNumpy(cpu._handle, status) # pylint: disable=protected-access
def _copy(self, ctx, device_name):
"""Copies tensor to dest device."""
# pylint: disable=protected-access
# Creates a new tensor on the dest device.
with errors.raise_exception_on_not_ok_status() as status:
h = pywrap_tensorflow.TFE_TensorHandleCopyToDevice(
self._handle, ctx._handle, device_name, status)
new_tensor = _tensor_from_handle(h)
if core.active_trace() is not None:
core.active_trace().record_tensor("COPY",
tape.tensor_id(new_tensor),
new_tensor.device,
new_tensor.shape.num_elements())
return new_tensor
# pylint: enable=protected-access
@property
def device(self):
return pywrap_tensorflow.TFE_TensorHandleDeviceName(self._handle)
@property
def dtype(self):
return self._dtype
@property
def shape(self):
"""The shape of this Tensor as a TensorShape object."""
n = pywrap_tensorflow.TFE_TensorHandleNumDims(self._handle)
# As of May 2017, TFE_TensorHandle objects were always backed by concrete
# tensors (which have a valid, known shape). There were vague plans to
# change this so that the Tensor class can also represent Tensors that have
# not yet been computed.
# If that happens, handle that (e.g., if n < 0: return tensor_shape(None))
# and also handle -1s returned by TFE_TensorHandleDim.
assert n >= 0, "See comment in source code"
return tensor_shape.TensorShape(
[pywrap_tensorflow.TFE_TensorHandleDim(self._handle, x)
for x in range(n)])
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
"""
n = pywrap_tensorflow.TFE_TensorHandleNumDims(self._handle)
# As of May 2017, TFE_TensorHandle objects were always backed by concrete
# tensors (which have a valid, known shape). There were vague plans to
# change this so that the Tensor class can also represent Tensors that have
# not yet been computed.
# If that happens, handle that (e.g., if n < 0: return tensor_shape(None))
# and also handle -1s returned by TFE_TensorHandleDim.
assert n >= 0, "See comment in source code"
return tuple(
pywrap_tensorflow.TFE_TensorHandleDim(self._handle, x)
for x in range(n))
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
def as_cpu_tensor(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.get_default_context(), "CPU:0")
def as_gpu_tensor(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.get_default_context(), "GPU:" + str(gpu_index))
def __bool__(self):
if self._shape_tuple() != (): # pylint: disable=g-explicit-bool-comparison
raise ValueError(
"Non-scalar tensor %s cannot be converted to boolean." % repr(self))
if self.dtype != dtypes.bool:
raise ValueError(
"Non-boolean tensor %s cannot be converted to boolean." % repr(self))
return bool(self.as_cpu_tensor().numpy())
def __nonzero__(self):
return self.__bool__()
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise NotImplementedError("op not supported for Eager Tensors.")
@property
def graph(self):
raise NotImplementedError("graph not supported for Eager Tensors.")
@property
def name(self):
raise NotImplementedError("name not supported for Eager Tensors.")
def set_shape(self, shape):
raise NotImplementedError("set_shape not supported for Eager Tensors.")
@property
def value_index(self):
raise NotImplementedError("value_index not supported for Eager Tensors.")
def consumers(self):
raise NotImplementedError("consumers not supported for Eager Tensors.")
def _add_consumer(self, consumer):
raise NotImplementedError("_add_consumer not supported for Eager Tensors.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported for Eager Tensors.")
def _as_tf_output(self):
raise NotImplementedError("_as_tf_output not supported for Eager Tensors.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError("eval not supported for Eager Tensors.")
class IndexedSlices(object):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. @{tf.gather}).
"""
def __init__(self, values, indices, dense_shape):
"""Creates an `IndexedSlices`."""
self._values = values
self._indices = indices
assert indices.shape[0] == values.shape[0]
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
class _Op(object):
"""Fake op for _LazyZero to make its python API tf.Tensor-like."""
def __init__(self):
self.type = "Zeros"
class LazyZero(object):
"""Lazily-instantiated zero-valued Tensor used as autograd accumulator."""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
self.op = _Op()
def __add__(self, other):
return other
def __radd__(self, other):
return other
def numpy(self):
return np.zeros(self.shape, self.dtype)
def convert_to_eager_tensor(t, dtype=None):
if isinstance(ag_core.getval(t), Tensor):
if dtype is not None and t.dtype != dtype:
raise TypeError("Expected tensor with type %r not %r" % (dtype, t.dtype))
return t
return Tensor(t, dtype=dtype)
def convert_n_to_eager_tensor(values, dtype):
return [convert_to_eager_tensor(t, dtype) for t in values]
def _tensor_from_handle(handle):
"""'Private' constructor for the Tensor object.
The existence of a 'handle' is an implementation detail that should be hidden
from users of this module. Functions within this module do need to create a
Tensor object from a handle though.
One option would be to have an __init__(self, handle) method on the
Tensor class, but that would make the existence and use of a handle
'public'.
Instead, this function avoids exposing a Tensor.__init__ that understands
handles and yet allows functions within this module to create Tensor
objects from a handle.
Arguments:
handle: A valid TFE_TensorHandle object.
Returns:
A Tensor object.
"""
# pylint: disable=protected-access
t = Tensor.__new__(Tensor)
t._id = tf_ops.uid()
t._handle = handle
t._dtype = dtypes.as_dtype(pywrap_tensorflow.TFE_TensorHandleDataType(handle))
t._handle_data = None
return t
# pylint: enable=protected-access
# TODO(ashankar): use actual device type.
def _in_gpu_device():
return context.get_default_context()._device_index > 0 # pylint: disable=protected-access
def _maybe_modify_numpy_dtype_determination(np_array):
"""Tweak numpy dtype determination.
numpy prefers int64 and float64, we prefer int32 and float32.
(int32 is often used as the "shape" input to various operations,
many of which only support int32 shapes).
This preference is copied from tensor_util.make_tensor_proto
(https://goto.google.com/numpy_prefs_156503903)
Args:
np_array: A numpy ndarray
Returns:
A numpy ndarray whose dtype may have been modified.
"""
if np_array.dtype == np.float64:
return np_array.astype(np.float32)
if np_array.dtype == np.int64:
# Downcast iff there is no precision loss.
downcasted = np_array.astype(np.int32)
if np.array_equal(downcasted, np_array):
return downcasted
return np_array
| |
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://github.com/CSTR-Edinburgh/merlin
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import os
import sys
import time
import tensorflow as tf
from tensorflow_lib import configuration
from tensorflow_lib import data_utils
from tensorflow_lib.train import TrainTensorflowModels,Train_Encoder_Decoder_Models
class TensorflowClass(object):
def __init__(self, cfg):
###################################################
########## User configurable variables ############
###################################################
inp_feat_dir = cfg.inp_feat_dir
out_feat_dir = cfg.out_feat_dir
pred_feat_dir = cfg.pred_feat_dir
inp_file_ext = cfg.inp_file_ext
out_file_ext = cfg.out_file_ext
### Input-Output ###
self.inp_dim = cfg.inp_dim
self.out_dim = cfg.out_dim
self.inp_norm = cfg.inp_norm
self.out_norm = cfg.out_norm
self.inp_stats_file = cfg.inp_stats_file
self.out_stats_file = cfg.out_stats_file
self.inp_scaler = None
self.out_scaler = None
#### define model params ####
self.hidden_layer_type = cfg.hidden_layer_type
self.hidden_layer_size = cfg.hidden_layer_size
self.sequential_training = cfg.sequential_training
self.encoder_decoder = cfg.encoder_decoder
self.attention = cfg.attention
self.cbhg = cfg.cbhg
self.batch_size = cfg.batch_size
self.shuffle_data = cfg.shuffle_data
self.output_layer_type = cfg.output_layer_type
self.loss_function = cfg.loss_function
self.optimizer = cfg.optimizer
self.rnn_params = cfg.rnn_params
self.dropout_rate = cfg.dropout_rate
self.num_of_epochs = cfg.num_of_epochs
### Define the work directory###
self.model_dir = cfg.model_dir
### define train, valid, test ###
train_file_number = cfg.train_file_number
valid_file_number = cfg.valid_file_number
test_file_number = cfg.test_file_number
file_id_scp = cfg.file_id_scp
test_id_scp = cfg.test_id_scp
#### main processess ####
self.NORMDATA = cfg.NORMDATA
self.TRAINMODEL = cfg.TRAINMODEL
self.TESTMODEL = cfg.TESTMODEL
#### Generate only test list ####
self.GenTestList = cfg.GenTestList
###################################################
####### End of user-defined conf variables ########
###################################################
#### Create train, valid and test file lists ####
file_id_list = data_utils.read_file_list(file_id_scp)
train_id_list = file_id_list[0: train_file_number]
valid_id_list = file_id_list[train_file_number: train_file_number + valid_file_number]
test_id_list = file_id_list[train_file_number + valid_file_number: train_file_number + valid_file_number + test_file_number]
valid_test_id_list = file_id_list[train_file_number: train_file_number + valid_file_number + test_file_number]
self.inp_train_file_list = data_utils.prepare_file_path_list(train_id_list, inp_feat_dir, inp_file_ext)
self.out_train_file_list = data_utils.prepare_file_path_list(train_id_list, out_feat_dir, out_file_ext)
self.inp_valid_file_list = data_utils.prepare_file_path_list(valid_id_list, inp_feat_dir, inp_file_ext)
self.out_valid_file_list = data_utils.prepare_file_path_list(valid_id_list, out_feat_dir, out_file_ext)
self.inp_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, inp_feat_dir, inp_file_ext)
self.out_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, out_feat_dir, out_file_ext)
self.gen_test_file_list = data_utils.prepare_file_path_list(valid_test_id_list, pred_feat_dir, out_file_ext)
if self.GenTestList:
test_id_list = data_utils.read_file_list(test_id_scp)
self.inp_test_file_list = data_utils.prepare_file_path_list(test_id_list, inp_feat_dir, inp_file_ext)
self.gen_test_file_list = data_utils.prepare_file_path_list(test_id_list, pred_feat_dir, out_file_ext)
if not self.encoder_decoder:
self.tensorflow_models = TrainTensorflowModels(self.inp_dim, self.hidden_layer_size, self.out_dim, self.hidden_layer_type, self.model_dir,
output_type=self.output_layer_type, dropout_rate=self.dropout_rate,
loss_function=self.loss_function, optimizer=self.optimizer)
else:
self.encoder_decoder_models = Train_Encoder_Decoder_Models(self.inp_dim,self.hidden_layer_size,self.out_dim,self.hidden_layer_type,output_type=self.output_layer_type,\
dropout_rate=self.dropout_rate,loss_function=self.loss_function,optimizer=self.optimizer,\
attention=self.attention,cbhg=self.cbhg)
def normlize_data(self):
### normalize train data ###
if os.path.isfile(self.inp_stats_file) and os.path.isfile(self.out_stats_file):
self.inp_scaler = data_utils.load_norm_stats(self.inp_stats_file, self.inp_dim, method=self.inp_norm)
self.out_scaler = data_utils.load_norm_stats(self.out_stats_file, self.out_dim, method=self.out_norm)
else:
print('preparing train_x, train_y from input and output feature files...')
train_x, train_y, train_flen = data_utils.read_data_from_file_list(self.inp_train_file_list, self.out_train_file_list,\
self.inp_dim, self.out_dim, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
print('computing norm stats for train_x...')
inp_scaler = data_utils.compute_norm_stats(train_x, self.inp_stats_file, method=self.inp_norm)
print('computing norm stats for train_y...')
out_scaler = data_utils.compute_norm_stats(train_y, self.out_stats_file, method=self.out_norm)
def train_tensorflow_model(self):
print('preparing train_x, train_y from input and output feature files...')
#### load the data ####
train_x, train_y, train_flen = data_utils.read_data_from_file_list(self.inp_train_file_list, self.out_train_file_list,
self.inp_dim, self.out_dim, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
#### normalize the data ####
data_utils.norm_data(train_x, self.inp_scaler, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
data_utils.norm_data(train_y, self.out_scaler, sequential_training=True if self.sequential_training or self.encoder_decoder else False)
#### define the model ####
if self.sequential_training:
utt_length=train_flen["utt2framenum"].values()
self.tensorflow_models.get_max_step(max(utt_length))
self.tensorflow_models.define_sequence_model()
elif self.encoder_decoder:
utt_length=train_flen["utt2framenum"].values()
super(Train_Encoder_Decoder_Models,self.encoder_decoder_models).__setattr__("max_step",max(utt_length))
self.encoder_decoder_models.define_encoder_decoder()
else:
self.tensorflow_models.define_feedforward_model()
#### train the model ####
print('training...')
if self.sequential_training:
### Train feedforward model ###
self.tensorflow_models.train_sequence_model(train_x, train_y, batch_size=self.batch_size, num_of_epochs=self.num_of_epochs, shuffle_data=self.shuffle_data,utt_length=utt_length)
elif self.encoder_decoder:
self.encoder_decoder_models.train_encoder_decoder_model(train_x,train_y,batch_size=self.batch_size,num_of_epochs=self.num_of_epochs,shuffle_data=True,utt_length=utt_length)
else:
self.tensorflow_models.train_feedforward_model(train_x, train_y, batch_size=self.batch_size, num_of_epochs=self.num_of_epochs, shuffle_data=self.shuffle_data)
def test_tensorflow_model(self):
#### load the data ####
print('preparing test_x from input feature files...')
test_x, test_flen = data_utils.read_test_data_from_file_list(self.inp_test_file_list, self.inp_dim)
#### normalize the data ####
data_utils.norm_data(test_x, self.inp_scaler)
#### compute predictions ####
if self.encoder_decoder:
self.encoder_decoder_models.predict(test_x,self.out_scaler,self.gen_test_file_list)
else:
self.tensorflow_models.predict(test_x, self.out_scaler, self.gen_test_file_list, self.sequential_training)
def main_function(self):
### Implement each module ###
if self.NORMDATA:
self.normlize_data()
if self.TRAINMODEL:
self.train_tensorflow_model()
if self.TESTMODEL:
self.test_tensorflow_model()
if __name__=="__main__":
if len(sys.argv) != 2:
print('usage: python run_tensorflow_with_merlin_io.py [config file name]')
sys.exit(1)
# create a configuration instance
# and get a short name for this instance
cfg = configuration.configuration()
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
print("--- Job started ---")
start_time = time.time()
# main function
tensorflow_instance = TensorflowClass(cfg)
# except:
# print "inp stats file is %s"%cfg.inp_stats_file
# sys.exit(0)
tensorflow_instance.main_function()
(m, s) = divmod(int(time.time() - start_time), 60)
print("--- Job completion time: %d min. %d sec ---" % (m, s))
sys.exit(0)
| |
# interact with (your) Music Player Daemon
# (c) Wijnand 'tehmaze' Modderman - http://tehmaze.com
# BSD License
#
# CHANGELOG
# 2011-02-20
# * sMiLe - changed methods to be similar to the mpc commands
# 2011-02-13
# * sMiLe - added several new functions
# 2011-01-16
# * BHJTW - adapted to jsonbot
# 2008-10-30
# * fixed "Now playing" when having a password on MPD
# 2007-11-16
# * added watcher support
# * added formatting options ('song-status')
# * added more precision to duration calculation
# 2007-11-10
# * initial version
#
# REFERENCES
#
# The MPD wiki is a great resource for MPD information, especially:
# * http://mpd.wikia.com/wiki/MusicPlayerDaemonCommands
#
""" music player daemon control. """
__version__ = '2007111601'
import os, socket, time
from jsb.lib.commands import cmnds
from jsb.lib.datadir import getdatadir
from jsb.lib.examples import examples
from jsb.lib.fleet import fleet
from jsb.utils.pdod import Pdod
from jsb.lib.persistconfig import PersistConfig
from jsb.lib.threads import start_new_thread
cfg = PersistConfig()
cfg.define('server-host', '127.0.0.1')
cfg.define('server-port', 6600)
cfg.define('server-pass', '')
cfg.define('socket-timeout', 15)
cfg.define('watcher-interval', 10)
cfg.define('watcher-enabled', 0)
cfg.define('song-status', 'now playing: %(artist)s - %(title)s on "%(album)s" (duration: %(time)s)')
class MPDError(Exception): pass
class MPDDict(dict):
def __getitem__(self, item):
if not dict.has_key(self, item):
return '?'
else:
return dict.__getitem__(self, item)
class MPDWatcher(Pdod):
def __init__(self):
Pdod.__init__(self, os.path.join(getdatadir() + os.sep + 'plugs' + os.sep + 'jsb.plugs.sockets.mpd', 'mpd'))
self.running = False
self.lastsong = -1
def add(self, bot, ievent):
if not self.has_key2(bot.name, ievent.channel):
self.set(bot.name, ievent.channel, True)
self.save()
def remove(self, bot, ievent):
if self.has_key2(bot.name, ievent.channel):
del self.data[bot.name][ievent.channel]
self.save()
def start(self):
self.running = True
start_new_thread(self.watch, ())
def stop(self):
self.running = False
def watch(self):
if not cfg.get('watcher-enabled'):
raise MPDError('watcher not enabled, use "!%s-cfg watcher-enabled 1" to enable' % os.path.basename(__file__)[:-3])
while self.running:
if self.data:
try:
status = MPDDict(mpd('currentsong'))
songid = int(status['id'])
if songid != self.lastsong:
self.lastsong = songid
self.announce(status)
except MPDError:
pass
except KeyError:
pass
time.sleep(cfg.get('watcher-interval'))
def announce(self, status):
if not self.running or not cfg.get('watcher-enabled'):
return
status['time'] = mpd_duration(status['time'])
song = cfg.get('song-status') % status
for name in self.data.keys():
bot = fleet.byname(name)
if bot:
for channel in self.data[name].keys():
bot.say(channel, song)
watcher = MPDWatcher()
if not watcher.data:
watcher = MPDWatcher()
def init():
if cfg.get('watcher-enabled'):
watcher.start()
return 1
def shutdown():
if watcher.running:
watcher.stop()
return 1
def mpd(command):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(cfg.get('socket-timeout'))
s.connect((cfg.get('server-host'), cfg.get('server-port')))
except socket.error, e:
raise MPDError, 'Failed to connect to server: %s' % str(e)
m = s.makefile('r')
l = m.readline()
if not l.startswith('OK MPD '):
s.close()
raise MPDError, 'Protocol error'
if cfg.get('server-pass') and cfg.get('server-pass') != 'off':
s.send('password %s\n' % cfg.get('server-pass'))
l = m.readline()
if not l.startswith('OK'):
s.close()
raise MPDError, 'Protocol error'
s.send('%s\n' % command)
s.send('close\n')
d = []
while True:
l = m.readline().strip()
if not l or l == 'OK':
break
if ': ' in l:
l = l.split(': ', 1)
l[0] = l[0].lower()
d.append(tuple(l))
s.close()
return d
def mpd_duration(timespec):
try:
timespec = int(timespec)
except ValueError:
return 'unknown'
timestr = ''
m = 60
h = m * 60
d = h * 24
w = d * 7
if timespec > w:
w, timespec = divmod(timespec, w)
timestr = timestr + '%02dw' % w
if timespec > d:
d, timespec = divmod(timespec, d)
timestr = timestr + '%02dd' % d
if timespec > h:
h, timespec = divmod(timespec, h)
timestr = timestr + '%02dh' % h
if timespec > m:
m, timespec = divmod(timespec, m)
timestr = timestr + '%02dm' % m
return timestr + '%02ds' % timespec
def handle_mpd(bot, ievent):
try:
result = []
song = MPDDict(mpd('currentsong'))
status = MPDDict(mpd('status'))
status['time'] = mpd_duration(status['time'])
reply = ''
if status['state'] == 'play':
status['state'] = 'playing'
reply += '%s: %s\n[%s] #%s/%s %s (%s)\n' % (song['name'], song['title'], status['state'], status['song'], status['playlistlength'], status['time'], '0')
if status['state'] == 'stop': status['state'] = 'stopped'
reply += 'volume: %s | repeat: %s | random: %s | single: %s | consume: %s' % (status['volume'], status['repeat'], status['random'], status['single'], status['consume'])
ievent.reply(reply)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_outputs(bot, ievent):
#Output 1 (My ALSA Device) is enabled
#[('outputid', '0'), ('outputname', 'My ALSA Device'), ('outputenabled', '1')]
try:
outputs = mpd('outputs')
outputid = '?'
outputname = '?'
outputenabled = '?'
result = []
for item in outputs:
if item[0] == 'outputid': outputid = int(item[1])+1
if item[0] == 'outputname': outputname = item[1]
if item[0] == 'outputenabled':
if item[1] == '1': outputenabled = 'enabled'
else: outputenabled = 'disabled'
result.append('Output %d (%s) is %s' % (outputid, outputname, outputenabled))
outputid = '?'
outputname = '?'
outputenabled = '?'
ievent.reply("\n".join(result))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_enable(bot, ievent):
try:
try: output = int(ievent.args[0])-1
except: ievent.missing('<output #>') ; return
result = mpd('enableoutput %d' % output)
ievent.reply(result)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_disable(bot, ievent):
try:
try: output = int(ievent.args[0])-1
except: ievent.missing('<output #>') ; return
result = mpd('disableoutput %d' % output)
ievent.reply(result)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_playlist(bot, ievent):
try:
playlist = mpd('playlistinfo')
tmp = ''
result = []
for item in playlist:
if item[0] == 'file':
if not tmp == '': result.append(tmp)
tmp = item[1]
if item[0] == 'title': tmp = item[1]
if item[0] == 'name': tmp = '%s: %s' % (item[1], tmp)
ievent.reply("\n".join(result))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_lsplaylists(bot, ievent):
try:
playlists = mpd('lsinfo')
result = []
for item in playlists:
if item[0] == 'playlist':
result.append(item[1])
ievent.reply("\n".join(result))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_playlist_manipulation(bot, ievent, command):
try:
if not ievent.args:
ievent.missing('<playlist>')
return
playlist = str(ievent.args[0])
result = mpd('%s %s' % (command, playlist))
ievent.reply('Playlist %s loaded.' % playlist)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_load(bot, ievent):
handle_mpd_playlist_manipulation(bot, ievent, 'load')
def handle_mpd_save(bot, ievent):
handle_mpd_playlist_manipulation(bot, ievent, 'save')
def handle_mpd_rm(bot, ievent):
handle_mpd_playlist_manipulation(bot, ievent, 'rm')
def handle_mpd_np(bot, ievent):
try:
status = MPDDict(mpd('currentsong'))
status['time'] = mpd_duration(status['time'])
ievent.reply(cfg.get('song-status') % status)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_simple_seek(bot, ievent, command):
try:
mpd(command)
#handle_mpd_np(bot, ievent)
handle_mpd(bot, ievent)
except MPDError, e:
ievent.reply(str(e))
handle_mpd_next = lambda b,i: handle_mpd_simple_seek(b,i,'next')
handle_mpd_prev = lambda b,i: handle_mpd_simple_seek(b,i,'prev')
handle_mpd_play = lambda b,i: handle_mpd_simple_seek(b,i,'play')
handle_mpd_stop = lambda b,i: handle_mpd_simple_seek(b,i,'stop')
handle_mpd_pause = lambda b,i: handle_mpd_simple_seek(b,i,'pause')
handle_mpd_clear = lambda b,i: handle_mpd_simple_seek(b,i,'clear')
handle_mpd_crop = lambda b,i: handle_mpd_simple_seek(b,i,'crop')
handle_mpd_shuffle = lambda b,i: handle_mpd_simple_seek(b,i,'shuffle')
handle_mpd_repeat = lambda b,i: handle_mpd_toggle_option(b,i,'repeat')
handle_mpd_random = lambda b,i: handle_mpd_toggle_option(b,i,'random')
handle_mpd_single= lambda b,i: handle_mpd_toggle_option(b,i,'single')
handle_mpd_consume= lambda b,i: handle_mpd_toggle_option(b,i,'consume')
handle_mpd_crossfade= lambda b,i: handle_mpd_set_option(b,i,'crossfade')
def handle_mpd_find(bot, ievent):
type = 'title'
args = ievent.args
if args and args[0].lower() in ['title', 'album', 'artist']: type = args[0].lower() ; args = args[1:]
if not args: ievent.missing('[<type>] <what>') ; return
try:
find = mpd('search %s "%s"' % (type, ' '.join(args)))
show = []
for item, value in find:
if item == 'file': show.append(value)
if show: ievent.reply("results: ", show)
else: ievent.reply('no result')
except MPDError, e: ievent.reply(str(e))
def handle_mpd_add(bot, ievent):
if not ievent.args:
ievent.missing('<file>')
return
try:
addid = MPDDict(mpd('addid "%s"' % ievent.rest))
if not addid.has_key('id'):
ievent.reply('failed to load song "%s"' % ievent.rest)
else:
ievent.reply('added song with id "%s", use "mpd-jump %s" to start playback' % (addid['id'], addid['id']))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_del(bot, ievent):
if not ievent.args:
ievent.missing('<position>')
return
try:
result = mpd('delete %d' % int(ievent.args[0]))
ievent.reply(result)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_jump(bot, ievent):
pos = 0
try: pos = int(ievent.args[0])
except: pass
if not pos:
ievent.missing('<playlist id>')
return
try:
mpd('playid %d' % pos)
handle_mpd_np(bot, ievent)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_stats(bot, ievent):
try:
status = MPDDict(mpd('stats'))
status['total playtime'] = mpd_duration(status['playtime'])
status['total database playtime'] = mpd_duration(status['db_playtime'])
status['uptime'] = mpd_duration(status['uptime'])
del status['playtime']
del status['db_playtime']
del status['db_update']
result = []
for item in sorted(status.keys()):
result.append('%s: %s' % (item, status[item]))
ievent.reply(" | ".join(result))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_volume(bot, ievent):
volume = 0
try: volume = int(ievent.args[0])
except: pass
if not volume:
status = MPDDict(mpd('status'))
ievent.reply('Current volume: %s' % status['volume'])
return
try:
mpd('setvol %d' % volume)
ievent.reply('Volume set to %d' % volume)
except MPDError, e:
ievent.reply(str(s))
def handle_mpd_toggle_option(bot, ievent, option):
if ievent.args:
val = 'on'
try: val = ievent.args[0]
except: pass
if val == '0' or val == 'off': val = '0'
else: val = '1'
else:
status = MPDDict(mpd('status'))
if status[option] == '0': val = '1'
else: val = '0'
try:
mpd('%s %s' % (option, val))
handle_mpd(bot, ievent)
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_set_option(bot, ievent, option):
try:
if ievent.args:
val = -1
try: val = int(ievent.args[0])
except: pass
if val > 0:
mpd('%s %s' % (option, val))
else:
ievent.reply('"off" is not 0 or positive integer' % ievent.args[0])
return
else:
status = MPDDict(mpd('status'))
ievent.reply("%s %s" % (option, status['xfade']))
except MPDError, e:
ievent.reply(str(e))
def handle_mpd_watch_start(bot, ievent):
if not cfg.get('watcher-enabled'):
ievent.reply('watcher not enabled, use "!%s-cfg watcher-enabled 1" to enable and reload the plugin' % os.path.basename(__file__)[:-3])
return
watcher.add(bot, ievent)
ievent.reply('ok')
def handle_mpd_watch_stop(bot, ievent):
if not cfg.get('watcher-enabled'):
ievent.reply('watcher not enabled, use "!%s-cfg watcher-enabled 1" to enable and reload the plugin' % os.path.basename(__file__)[:-3])
return
watcher.remove(bot, ievent)
ievent.reply('ok')
def handle_mpd_watch_list(bot, ievent):
if not cfg.get('watcher-enabled'):
ievent.reply('watcher not enabled, use "!%s-cfg watcher-enabled 1" to enable and reload the plugin' % os.path.basename(__file__)[:-3])
return
result = []
for name in sorted(watcher.data.keys()):
if watcher.data[name]:
result.append('on %s:' % name)
for channel in sorted(watcher.data[name].keys()):
result.append(channel)
if result:
ievent.reply(' '.join(result))
else:
ievent.reply('no watchers running')
cmnds.add('mpd-jump', handle_mpd_jump, 'MPD', threaded=True)
examples.add('mpd-jump', 'jump to the specified playlist id', 'mpd-jump 666')
cmnds.add('mpd-watch-start', handle_mpd_watch_start, 'MPD', threaded=True)
cmnds.add('mpd-watch-stop', handle_mpd_watch_stop, 'MPD', threaded=True)
cmnds.add('mpd-watch-list', handle_mpd_watch_list, 'MPD', threaded=True)
#mpd Display status
cmnds.add('mpd', handle_mpd, 'USER', threaded=True)
examples.add('mpd', 'Display mpd status', 'mpd')
#mpd-add <file> Add a song to the current playlist
cmnds.add('mpd-add', handle_mpd_add, 'MPD', threaded=True)
cmnds.add('mpd-queue', handle_mpd_add, 'MPD', threaded=True)
examples.add('mpd-add', 'Add a song to the current playlist', 'mpd-add mp3/bigbeat/fatboy slim/fatboy slim - everybody needs a 303.mp3')
#mpd-crop Remove all but the currently playing song
cmnds.add('mpd-crop', handle_mpd_crop, 'MPD', threaded=True)
examples.add('mpd-crop', 'Remove all but the currently playing song', 'mpd-crop')
#mpd-current Show the currently playing song
cmnds.add('mpd-current', handle_mpd_np, 'USER', threaded=True)
cmnds.add('mpd-np', handle_mpd_np, 'USER', threaded=True)
examples.add('mpd-current', 'Show the currently playing song', 'mpd-current')
#mpd-del <position> Remove a song from the current playlist
cmnds.add('mpd-del', handle_mpd_del, 'MPD', threaded=True)
examples.add('mpd-del', 'Remove a song from the current playlist', 'mpd-del 1')
#mpd-play [<position>] Start playing at <position> (default: 1)
cmnds.add('mpd-play', handle_mpd_play, 'MPD', threaded=True)
examples.add('mpd-play', 'Start playing at <position> (default: 1)', 'mpd-play')
#mpd-next Play the next song in the current playlist
cmnds.add('mpd-next', handle_mpd_next, 'MPD', threaded=True)
examples.add('mpd-next', 'Play the next song in the current playlist', 'mpd-next')
#mpd-prev Play the previous song in the current playlist
cmnds.add('mpd-prev', handle_mpd_prev, 'MPD', threaded=True)
examples.add('mpd-prev', 'Play the previous song in the current playlist', 'mpd-prev')
#mpd-pause Pauses the currently playing song
cmnds.add('mpd-pause', handle_mpd_pause, 'MPD', threaded=True)
examples.add('mpd-pause', 'Pauses the currently playing song', 'mpd-pause')
#mpd-toggle Toggles Play/Pause, plays if stopped
cmnds.add('mpd-toggle', handle_mpd_pause, 'MPD', threaded=True)
examples.add('mpd-toggle', 'Toggles Play/Pause, plays if stopped', 'mpd-toggle')
#mpd-stop Stop the currently playing playlists
cmnds.add('mpd-stop', handle_mpd_stop, 'MPD', threaded=True)
examples.add('mpd-stop', 'Stop the currently playing playlists', 'mpd-stop')
# TODO mpd-seek [+-][HH:MM:SS]|<0-100>% Seeks to the specified position
# cmnds.add('mpd-seek', handle_mpd_seek, 'MPD')
#mpd-clear Clear the current playlist
cmnds.add('mpd-clear', handle_mpd_clear, 'MPD', threaded=True)
examples.add('mpd-clear', 'Clear the current playlist', 'mpd-clear')
#mpd-outputs Show the current outputs
cmnds.add('mpd-outputs', handle_mpd_outputs, 'MPD', threaded=True)
examples.add('mpd-outputs', 'Show the current outputs', 'mpd-outputs')
#mpd-enable <output #> Enable a output
cmnds.add('mpd-enable', handle_mpd_enable, 'MPD', threaded=True)
examples.add('mpd-enable', 'Enable a output', 'mpd-enable <output #>')
#mpd-disable <output #> Disable a output
cmnds.add('mpd-disable', handle_mpd_disable, 'MPD', threaded=True)
examples.add('mpd-disable', 'Disable a output', 'mpd-disable <output #>')
#mpd-shuffle Shuffle the current playlist
cmnds.add('mpd-shuffle', handle_mpd_shuffle, 'MPD', threaded=True)
examples.add('mpd-shuffle', 'Shuffle the current playlist', 'mpd-shuffle')
# TODO mpd move <from> <to> Move song in playlist
#cmnds.add('mpd-move', handle_mpd_move, 'MPD')
#examples.add('mpd-move', 'Move song in playlist', 'mpd-move <from> <to>')
#mpd-playlist Print the current playlist
cmnds.add('mpd-playlist', handle_mpd_playlist, 'USER', threaded=True)
examples.add('mpd-playlist', 'Print the current playlist', 'mpd-playlist')
# TODO mpd listall [<file>] List all songs in the music dir
#cmnds.add('mpd-listall', handle_mpd_listall, 'USER')
#examples.add('mpd-listall', 'List all songs in the music dir', 'mpd-listall [<file>]')
# TODO mpd ls [<directory>] List the contents of <directory>
#cmnds.add('mpd-ls', handle_mpd_ls, 'USER')
#examples.add('mpd-ls', 'List the contents of <directory>', 'mpd-ls [<directory>]')
#mpd-lsplaylists List currently available playlists
cmnds.add('mpd-lsplaylists', handle_mpd_lsplaylists, 'USER', threaded=True)
examples.add('mpd-lsplaylists', 'List currently available playlists', 'mpd-lsplaylists')
#mpd-load <file> Load <file> as a playlist
cmnds.add('mpd-load', handle_mpd_load, 'MPD', threaded=True)
examples.add('mpd-load', 'Load <file> as a playlist', 'mpd-load <file>')
#mpd-save <file> Save a playlist as <file>
cmnds.add('mpd-save', handle_mpd_save, 'MPD', threaded=True)
examples.add('mpd-save', 'Save a playlist as <file>', 'mpd-save <file>')
#mpd-rm <file> Remove a playlist
cmnds.add('mpd-rm', handle_mpd_rm, 'MPD', threaded=True)
examples.add('mpd-rm', 'Remove a playlist', 'mpd-rm <file>')
#mpd-volume [+-]<num> Set volume to <num> or adjusts by [+-]<num>
# TODO [+-]
cmnds.add('mpd-volume', handle_mpd_volume, 'MPD', threaded=True)
examples.add('mpd-volume', 'Set volume to <num> or adjusts by [+-]<num>', 'mpd-volume 42')
#mpd-repeat <on|off> Toggle repeat mode, or specify state
cmnds.add('mpd-repeat', handle_mpd_repeat, 'MPD', threaded=True)
examples.add('mpd-volume', 'Toggle repeat mode, or specify state', 'mpd-repeat off')
#mpd-random <on|off> Toggle random mode, or specify state
cmnds.add('mpd-random', handle_mpd_random, 'MPD', threaded=True)
examples.add('mpd-random', 'Toggle random mode, or specify state', 'mpd-random <on|off>')
#mpd-single <on|off> Toggle single mode, or specify state
cmnds.add('mpd-single', handle_mpd_single, 'MPD', threaded=True)
examples.add('mpd-single', 'Toggle single mode, or specify state', 'mpd-single <on|off>')
#mpd-consume <on|off> Toggle consume mode, or specify state
cmnds.add('mpd-consume', handle_mpd_consume, 'MPD', threaded=True)
examples.add('mpd-consume', 'Toggle consume mode, or specify state', 'mpd-consume <on|off>')
# TODO mpd search <type> <query> Search for a song
#cmnds.add('mpd-search', handle_mpd_search, 'MPD')
#examples.add('mpd-search', 'Search for a song', 'mpd-search <type> <query>')
#mpd-find <type> <query> Find a song (exact match)
cmnds.add('mpd-find', handle_mpd_find, 'MPD', threaded=True)
examples.add('mpd-find', 'Find a song (exact match)', 'mpd-find title love')
# TODO mpd-findadd <type> <query> Find songs and add them to the current playlist
# cmnds.add('mpd-findadd', handle_mpd_findadd, 'MPD')
#examples.add('mpd-findadd', 'Find songs and add them to the current playlist', 'mpd-findadd <type> <query>')
# TODO mpd-list <type> [<type> <query>] Show all tags of <type>
# cmnds.add('mpd-list', handle_mpd_list, 'MPD')
#examples.add('mpd-list', 'Show all tags of <type>', 'mpd-list <type> [<type> <query>]')
#mpd-crossfade [<seconds>] Set and display crossfade settings
cmnds.add('mpd-crossfade', handle_mpd_crossfade, 'MPD', threaded=True)
examples.add('mpd-crossfade', 'Set and display crossfade settings', 'mpd-crossfade 42')
#mpd-update [<path>] Scan music directory for updates
#cmnds.add('mpd-update', handle_mpd_update, 'MPD')
#mpd-sticker <uri> <get|set|list|del> <args..> Sticker management
#cmnds.add('mpd-sticker', handle_mpd_sticker, 'MPD')
#mpd-stats Display statistics about MPD
cmnds.add('mpd-stats', handle_mpd_stats, 'USER', threaded=True)
examples.add('mpd-stats', 'Display statistics about MPD', 'mpd-stats')
#mpd-version Report version of MPD
#cmnds.add('mpd-version', handle_mpd_version, 'USER')
#mpd-idle [events] Idle until an event occurs
#cmnds.add('mpd-idle', handle_mpd_idle, 'MPD')
#mpd-idleloop [events] Continuously idle until an event occurs
#cmnds.add('mpd-idleloop', handle_mpd_idleloop, 'MPD')
# TODO mpd-replaygain [off|track|ablum] Set or display the replay gain mode
# cmnds.add('mpd-replaygain', handle_mpd_replaygain, 'MPD')
#examples.add('mpd-replaygain', 'Set or display the replay gain mode', 'mpd-replaygain')
| |
# coding=utf-8
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import re
from future.utils import text_type
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE
from pants.engine.console import Console
from pants.engine.fs import Digest, FilesContent
from pants.engine.goal import Goal
from pants.engine.legacy.graph import HydratedTarget, HydratedTargets
from pants.engine.objects import Collection
from pants.engine.rules import console_rule, optionable_rule, rule
from pants.engine.selectors import Get
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_method
from pants.util.objects import datatype, enum
class DetailLevel(enum(['none', 'summary', 'nonmatching', 'all'])):
"""How much detail about validation to emit to the console.
none: Emit nothing.
summary: Emit a summary only.
nonmatching: Emit details for source files that failed to match at least one required pattern.
all: Emit details for all source files.
"""
pass
class Validate(Goal):
name = 'validate'
@classmethod
def register_options(cls, register):
super(Validate, cls).register_options(register)
register('--detail-level', type=DetailLevel, default=DetailLevel.nonmatching,
help='How much detail to emit to the console.')
class SourceFileValidation(Subsystem):
options_scope = 'sourcefile-validation'
@classmethod
def register_options(cls, register):
super(SourceFileValidation, cls).register_options(register)
# Config schema is as follows:
#
# {
# 'path_patterns': {
# 'path_pattern1': {
# 'pattern': <path regex pattern>,
# 'inverted': True|False (defaults to False),
# 'content_encoding': <encoding> (defaults to utf8)
# }
# ...
# },
# 'content_patterns': {
# 'content_pattern1': {
# 'pattern': <content regex pattern>,
# 'inverted': True|False (defaults to False)
# }
# ...
# },
# 'required_matches': {
# 'path_pattern1': [content_pattern1, content_pattern2],
# 'path_pattern2': [content_pattern1, content_pattern3],
# ...
# }
# }
#
# Meaning: if a file matches some path pattern, its content must match all the corresponding
# content patterns.
register('--config', type=dict, fromfile=True,
# TODO: Replace "See documentation" with actual URL, once we have some.
help='Source file regex matching config. See documentation for config schema.')
@memoized_method
def get_multi_matcher(self):
return MultiMatcher(self.get_options().config)
class RegexMatchResult(datatype([
('path', text_type), ('matching', tuple), ('nonmatching', tuple)
])):
"""The result of running regex matches on a source file."""
RegexMatchResults = Collection.of(RegexMatchResult)
class Matcher(object):
"""Class to match a single (possibly inverted) regex.
Matches are allowed anywhere in the string (so really a "search" in the Python regex parlance).
To anchor a match at the beginning of a string, use the ^ anchor. To anchor at the beginning
of any line, use the ^ anchor along with the MULTILINE directive (?m). See test for examples.
"""
def __init__(self, pattern, inverted=False):
self.compiled_regex = re.compile(pattern)
self.inverted = inverted
def matches(self, s):
"""Whether the pattern matches anywhere in the string s."""
regex_matches = self.compiled_regex.search(s) is not None
return not regex_matches if self.inverted else regex_matches
class PathMatcher(Matcher):
"""A matcher for matching file paths."""
def __init__(self, pattern, inverted=False, content_encoding='utf8'):
super(PathMatcher, self).__init__(pattern, inverted)
# The expected encoding of the content of files whose paths match this pattern.
self.content_encoding = content_encoding
class ContentMatcher(Matcher):
"""A matcher for matching file content."""
pass
class MultiMatcher(object):
def __init__(self, config):
"""Class to check multiple regex matching on files.
:param dict config: Regex matching config (see above).
"""
path_patterns = config.get('path_patterns', {})
content_patterns = config.get('content_patterns', {})
required_matches = config.get('required_matches', {})
# Validate the pattern names mentioned in required_matches.
path_patterns_used = set()
content_patterns_used = set()
for k, v in required_matches.items():
path_patterns_used.add(k)
if not isinstance(v, (tuple, list)):
raise ValueError('Value for path pattern {} in required_matches must be tuple of '
'content pattern names, but was {}'.format(k, v))
content_patterns_used.update(v)
unknown_path_patterns = path_patterns_used.difference(path_patterns.keys())
if unknown_path_patterns:
raise ValueError('required_matches uses unknown path pattern names: '
'{}'.format(', '.join(sorted(unknown_path_patterns))))
unknown_content_patterns = content_patterns_used.difference(content_patterns.keys())
if unknown_content_patterns:
raise ValueError('required_matches uses unknown content pattern names: '
'{}'.format(', '.join(sorted(unknown_content_patterns))))
self._path_matchers = {k: PathMatcher(**v) for k, v in path_patterns.items()}
self._content_matchers = {k: ContentMatcher(**v) for k, v in content_patterns.items()}
self._required_matches = required_matches
def check_source_file(self, path, content):
content_pattern_names, encoding = self.get_applicable_content_pattern_names(path)
matching, nonmatching = self.check_content(content_pattern_names, content, encoding)
return RegexMatchResult(path, matching, nonmatching)
def check_content(self, content_pattern_names, content, encoding):
"""Check which of the named patterns matches the given content.
Returns a pair (matching, nonmatching), in which each element is a tuple of pattern names.
:param iterable content_pattern_names: names of content patterns to check.
:param bytes content: the content to check.
:param str encoding: the expected encoding of content.
"""
if not content_pattern_names or not encoding:
return (), ()
matching = []
nonmatching = []
for content_pattern_name in content_pattern_names:
if self._content_matchers[content_pattern_name].matches(content.decode(encoding)):
matching.append(content_pattern_name)
else:
nonmatching.append(content_pattern_name)
return tuple(matching), tuple(nonmatching)
def get_applicable_content_pattern_names(self, path):
"""Return the content patterns applicable to a given path.
Returns a tuple (applicable_content_pattern_names, content_encoding).
If path matches no path patterns, the returned content_encoding will be None (and
applicable_content_pattern_names will be empty).
"""
encodings = set()
applicable_content_pattern_names = set()
for path_pattern_name, content_pattern_names in self._required_matches.items():
m = self._path_matchers[path_pattern_name]
if m.matches(path):
encodings.add(m.content_encoding)
applicable_content_pattern_names.update(content_pattern_names)
if len(encodings) > 1:
raise ValueError('Path matched patterns with multiple content encodings ({}): {}'.format(
', '.join(sorted(encodings)), path
))
content_encoding = next(iter(encodings)) if encodings else None
return applicable_content_pattern_names, content_encoding
# TODO: Switch this to `lint` once we figure out a good way for v1 tasks and v2 rules
# to share goal names.
@console_rule(Validate, [Console, HydratedTargets, Validate.Options])
def validate(console, hydrated_targets, validate_options):
per_tgt_rmrs = yield [Get(RegexMatchResults, HydratedTarget, ht) for ht in hydrated_targets]
regex_match_results = list(itertools.chain(*per_tgt_rmrs))
detail_level = validate_options.values.detail_level
regex_match_results = sorted(regex_match_results, key=lambda x: x.path)
num_matched_all = 0
num_nonmatched_some = 0
for rmr in regex_match_results:
if not rmr.matching and not rmr.nonmatching:
continue
if rmr.nonmatching:
icon = 'X'
num_nonmatched_some += 1
else:
icon = 'V'
num_matched_all += 1
matched_msg = ' Matched: {}'.format(','.join(rmr.matching)) if rmr.matching else ''
nonmatched_msg = (" Didn't match: {}".format(','.join(rmr.nonmatching))
if rmr.nonmatching else '')
if (detail_level == DetailLevel.all or
(detail_level == DetailLevel.nonmatching and nonmatched_msg)):
console.print_stdout("{} {}:{}{}".format(icon, rmr.path, matched_msg, nonmatched_msg))
if detail_level != DetailLevel.none:
console.print_stdout('\n{} files matched all required patterns.'.format(num_matched_all))
console.print_stdout('{} files failed to match at least one required pattern.'.format(
num_nonmatched_some))
if num_nonmatched_some:
console.print_stderr('Files failed validation.')
exit_code = PANTS_FAILED_EXIT_CODE
else:
exit_code = PANTS_SUCCEEDED_EXIT_CODE
yield Validate(exit_code)
@rule(RegexMatchResults, [HydratedTarget, SourceFileValidation])
def match_regexes_for_one_target(hydrated_target, source_file_validation):
multi_matcher = source_file_validation.get_multi_matcher()
rmrs = []
if hasattr(hydrated_target.adaptor, 'sources'):
files_content = yield Get(FilesContent,
Digest, hydrated_target.adaptor.sources.snapshot.directory_digest)
for file_content in files_content:
rmrs.append(multi_matcher.check_source_file(file_content.path, file_content.content))
yield RegexMatchResults(rmrs)
def rules():
return [
validate,
match_regexes_for_one_target,
optionable_rule(SourceFileValidation),
]
| |
"""
Makes logger
*Examples:* ::
>>> import common.diagnostic.pcsLogger
>>> maya_logger = common.diagnostic.pcsLogger.logger
>>> mobu_logger = common.diagnostic.pcsLogger.moBuLogger
*Author:*
* Jason.Parks, jason@continuityai.com, 1/8/14 5:04 PM
"""
from common.core import globalVariables as gv
import exceptions
import getpass
import logging.handlers
import os
import sys
# Remember, cannot import MayaCore because it imports this module
MAILHOST = 'mail.myCompany.com'
FROMADDRESS = '@myCompany.com'
# email addresses
gameArtPipelineEmail = 'logger@gameartpipeline.com'
teamATechArtistEmail = 'techArtistA@myCompany.com'
teamBTechArtistEmail = 'techArtistB@myCompany.com'
toAddresses = [gameArtPipelineEmail]
#artTeam = parser_pcs.ParseSomething().userGet('myCompanyActiveTeam')
artTeam = 'GreatGameA'
if artTeam == gv.teamA:
toAddresses = [gameArtPipelineEmail, teamATechArtistEmail]
if artTeam == gv.teamB:
toAddresses = [gameArtPipelineEmail, teamBTechArtistEmail]
CRITICAL = 50
FATAL = CRITICAL
# ours
ERRORDIALOG = 45
ERROR = 40
WARNING = 30
WARN = WARNING
# ours
INFODIALOG = 25
INFO = 20
# ours
DEBUGDIALOG = 15
DEBUG = 10
NOTSET = 0
class PCSlogging(logging.Logger):
""" subclass to wrap error method to return function to end script """
def errorDialog(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "thorny problem", exc_info=1)
"""
print "Using PCSlogging subclass"
if self.isEnabledFor(INFODIALOG):
self._log(ERROR, msg, args, **kwargs)
title = 'logger.errorDialog'
if len(args) > 1:
title = args[1]
try:
from maya import cmds
result = cmds.confirmDialog(t=title, m=msg, b=['OK', 'Cancel'], db='OK', cb='Cancel', ds='Cancel')
if result == "Cancel":
print "*** Canceled ***"
sys.exit()
except RuntimeError:
import pyfbsdk
pyfbsdk.FBMessageBox(title, msg, "OK")
finally:
print "Not in Maya or MoBu?"
raise exceptions.BaseException(msg)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
print "Using PCSlogging subclass"
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
raise exceptions.BaseException(msg)
def infoDialog(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "thorny problem", exc_info=1)
"""
print "Using PCSlogging subclass"
if self.isEnabledFor(INFODIALOG):
self._log(INFO, msg, args, **kwargs)
title = 'logger.infoDialog'
if len(args) > 1:
title = args[1]
try:
# noinspection PyUnresolvedReferences
import maya.cmds
result = maya.cmds.confirmDialog(t=title, m=msg, b=['OK', 'Cancel'], db='OK', cb='Cancel', ds='Cancel')
if result == "Cancel":
print "*** Canceled ***"
sys.exit()
except ImportError:
import pyfbsdk
pyfbsdk.FBMessageBox(title, msg, "OK")
finally:
print "Not in Maya or MoBu?"
def debugDialog(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
print "Using PCSlogging subclass"
if self.isEnabledFor(DEBUGDIALOG):
self._log(DEBUG, msg, args, **kwargs)
title = 'logger.debugDialog'
if len(args) > 1:
title = args[1]
try:
from maya import cmds
result = cmds.confirmDialog(t=title, m=msg, b=['OK', 'Cancel'], db='OK', cb='Cancel', ds='Cancel')
if result == "Cancel":
print "*** Canceled ***"
sys.exit()
except RuntimeError:
import pyfbsdk
pyfbsdk.FBMessageBox(title, msg, "OK")
finally:
print "Not in Maya or MoBu?"
def pcs_logger(filePathName='%s/data/%s/%s.log' % (gv.logLocation, getpass.getuser(), getpass.getuser()), _logger='',
name='pcsLogger', fresh=0):
"""
SYNOPSIS: Creates nice logger. Will nuke FileHandler logger of similar
type if filePathName is different from existing loggers
INPUTS
(string) filePathName: path and file name of FileHandler stream
(logging.Logger) logger: python or pymel logger object
(string) name: 'handle' to logger object
(bool) fresh: 1 = tries to delete old log file
RETURNS: (logging.Logger) logger object
"""
# create dir if doesn't exist
filePathName = filePathName.replace('\\', '/')
filePath = filePathName[:filePathName.rfind('/')]
if not os.path.exists(filePath):
os.makedirs(filePath)
# delete file if already exists
if fresh:
try:
if os.path.exists(filePathName):
os.remove(filePathName)
except RuntimeError:
print ("'%s' still opened, skipping deletion" % filePathName)
# use our logger # Not needed w/ pymelLogger
logging.setLoggerClass(PCSlogging)
if not isinstance(_logger, logging.Logger):
_logger = logging.getLogger(name)
else:
name = _logger.name
# turn off Exceptions?
logging.raiseExceptions = 1
# create file format
if sys.version_info[1] >= 5:
# python 2.6 allows function name in logging
formatter = logging.Formatter('%(asctime)s, %(module)s, %(funcName)s, %(lineno)d, %(levelname)s, %(message)s')
else:
formatter = logging.Formatter('%(asctime)s, %(lineno)d, %(levelname)s, %(message)s')
# determine if handler has been made
makeNewFileHandler = 1
makeNewSMTPHandler = 1
smtpHandlerFound = 0
if len(_logger.handlers):
for hndlr in map(lambda x: x, _logger.handlers):
if hndlr.__class__ == logging.FileHandler:
if not hndlr.stream.closed:
if hndlr.stream.name == filePathName:
makeNewFileHandler = 0
else:
deleteHandler(_logger.handlers, hndlr)
else:
deleteHandler(_logger.handlers, hndlr)
elif smtpHandlerFound and hndlr.__class__ == logging.handlers.SMTPHandler:
deleteHandler(_logger.handlers, hndlr)
else:
smtpHandlerFound = 1
makeNewSMTPHandler = 0
# Make fileHandler and email handler
if makeNewFileHandler:
hdlr = logging.FileHandler(filePathName)
#hdlr = logging.handlers.RotatingFileHandler(filePathName, 'a', 500, 2)
hdlr.setFormatter(formatter)
_logger.addHandler(hdlr)
# Make email handler
if makeNewSMTPHandler:
userName = getpass.getuser()
sHdlr = logging.handlers.SMTPHandler(MAILHOST, '%s%s' % (userName, FROMADDRESS), toAddresses,
'%s: Critical from %s' % (name, userName))
sHdlr.setLevel(logging.CRITICAL)
_logger.addHandler(sHdlr)
# set default level
_logger.setLevel(logging.INFO)
return _logger
def addMoBuHandler(_logger):
hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(funcName)s, %(lineno)d, %(levelname)s, %(message)s')
hdlr.setFormatter(formatter)
_logger.addHandler(hdlr)
return _logger
def deleteHandler(handlers, handler):
"""
SYNOPSIS: removes handler from list of logger.handlers
INPUTS
(list) handlers: list of handlers
(logger.handler) handler: handler to remove from list
RETURNS: nothing
"""
i = 0
for hnd in handlers:
if hnd == handler:
del (handlers[i])
return
i += 1
# convenience instance
logger = pcs_logger()
moBuLogger = addMoBuHandler(pcs_logger(name='pcs_mobu_logger'))
if __name__ == '__main__':
print 'run from Maya session'
else:
print "common.diagnostic.pcsLogger imported"
| |
"""Benchmark from Laurent Vaucher.
Source: https://github.com/slowfrog/hexiom : hexiom2.py, level36.txt
(Main function tweaked by Armin Rigo.)
"""
from __future__ import division, print_function
import time
from io import StringIO
import cython
##################################
class Dir(object):
def __init__(self, x, y):
self.x = x
self.y = y
DIRS = [ Dir(1, 0),
Dir(-1, 0),
Dir(0, 1),
Dir(0, -1),
Dir(1, 1),
Dir(-1, -1) ]
EMPTY = 7
##################################
class Done(object):
MIN_CHOICE_STRATEGY = 0
MAX_CHOICE_STRATEGY = 1
HIGHEST_VALUE_STRATEGY = 2
FIRST_STRATEGY = 3
MAX_NEIGHBORS_STRATEGY = 4
MIN_NEIGHBORS_STRATEGY = 5
def __init__(self, count, empty=False):
self.count = count
self.cells = None if empty else [[0, 1, 2, 3, 4, 5, 6, EMPTY] for i in range(count)]
def clone(self):
ret = Done(self.count, True)
ret.cells = [self.cells[i][:] for i in range(self.count)]
return ret
def __getitem__(self, i):
return self.cells[i]
def set_done(self, i, v):
self.cells[i] = [v]
def already_done(self, i):
return len(self.cells[i]) == 1
def remove(self, i, v):
if v in self.cells[i]:
self.cells[i].remove(v)
return True
else:
return False
def remove_all(self, v):
for i in range(self.count):
self.remove(i, v)
def remove_unfixed(self, v):
changed = False
for i in range(self.count):
if not self.already_done(i):
if self.remove(i, v):
changed = True
return changed
def filter_tiles(self, tiles):
for v in range(8):
if tiles[v] == 0:
self.remove_all(v)
@cython.locals(i=cython.int)
def next_cell_min_choice(self):
minlen = 10
mini = -1
for i in range(self.count):
if 1 < len(self.cells[i]) < minlen:
minlen = len(self.cells[i])
mini = i
return mini
@cython.locals(i=cython.int)
def next_cell_max_choice(self):
maxlen = 1
maxi = -1
for i in range(self.count):
if maxlen < len(self.cells[i]):
maxlen = len(self.cells[i])
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_highest_value(self):
maxval = -1
maxi = -1
for i in range(self.count):
if (not self.already_done(i)):
maxvali = max([k for k in self.cells[i] if k != EMPTY])
if maxval < maxvali:
maxval = maxvali
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_first(self):
for i in range(self.count):
if (not self.already_done(i)):
return i
return -1
@cython.locals(i=cython.int)
def next_cell_max_neighbors(self, pos):
maxn = -1
maxi = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n > maxn:
maxn = n
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_min_neighbors(self, pos):
minn = 7
mini = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n < minn:
minn = n
mini = i
return mini
def next_cell(self, pos, strategy=HIGHEST_VALUE_STRATEGY):
if strategy == Done.HIGHEST_VALUE_STRATEGY:
return self.next_cell_highest_value()
elif strategy == Done.MIN_CHOICE_STRATEGY:
return self.next_cell_min_choice()
elif strategy == Done.MAX_CHOICE_STRATEGY:
return self.next_cell_max_choice()
elif strategy == Done.FIRST_STRATEGY:
return self.next_cell_first()
elif strategy == Done.MAX_NEIGHBORS_STRATEGY:
return self.next_cell_max_neighbors(pos)
elif strategy == Done.MIN_NEIGHBORS_STRATEGY:
return self.next_cell_min_neighbors(pos)
else:
raise Exception("Wrong strategy: %d" % strategy)
##################################
class Node(object):
def __init__(self, pos, id, links):
self.pos = pos
self.id = id
self.links = links
##################################
class Hex(object):
@cython.locals(size=cython.int, id=cython.int, x=cython.int, y=cython.int)
def __init__(self, size):
self.size = size
self.count = 3 * size * (size - 1) + 1
self.nodes_by_id = self.count * [None]
self.nodes_by_pos = {}
id = 0
for y in range(size):
for x in range(size + y):
pos = (x, y)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
for y in range(1, size):
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos = (x, ry)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
@cython.locals(dir=Dir, x=cython.int, y=cython.int, nx=cython.int, ny=cython.int, node=Node)
def link_nodes(self):
for node in self.nodes_by_id:
(x, y) = node.pos
for dir in DIRS:
nx = x + dir.x
ny = y + dir.y
if self.contains_pos((nx, ny)):
node.links.append(self.nodes_by_pos[(nx, ny)].id)
def contains_pos(self, pos):
return pos in self.nodes_by_pos
def get_by_pos(self, pos):
return self.nodes_by_pos[pos]
def get_by_id(self, id):
return self.nodes_by_id[id]
##################################
class Pos(object):
def __init__(self, hex, tiles, done = None):
self.hex = hex
self.tiles = tiles
self.done = Done(hex.count) if done is None else done
def clone(self):
return Pos(self.hex, self.tiles, self.done.clone())
##################################
@cython.locals(pos=Pos, i=cython.long, v=cython.int,
nid=cython.int, num=cython.int,
empties=cython.int, filled=cython.int,
vmax=cython.int, vmin=cython.int, cell=list, left=cython.int[8])
def constraint_pass(pos, last_move=None):
changed = False
left = pos.tiles[:]
done = pos.done
# Remove impossible values from free cells
free_cells = (range(done.count) if last_move is None
else pos.hex.get_by_id(last_move).links)
for i in free_cells:
if not done.already_done(i):
vmax = 0
vmin = 0
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
for num in range(7):
if (num < vmin) or (num > vmax):
if done.remove(i, num):
changed = True
# Computes how many of each value is still free
for cell in done.cells:
if len(cell) == 1:
left[cell[0]] -= 1
for v in range(8):
# If there is none, remove the possibility from all tiles
if (pos.tiles[v] > 0) and (left[v] == 0):
if done.remove_unfixed(v):
changed = True
else:
possible = sum([(1 if v in cell else 0) for cell in done.cells])
# If the number of possible cells for a value is exactly the number of available tiles
# put a tile in each cell
if pos.tiles[v] == possible:
for i in range(done.count):
cell = done.cells[i]
if (not done.already_done(i)) and (v in cell):
done.set_done(i, v)
changed = True
# Force empty or non-empty around filled cells
filled_cells = (range(done.count) if last_move is None
else [last_move])
for i in filled_cells:
if done.already_done(i):
num = done[i][0]
empties = 0
filled = 0
unknown = []
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] == EMPTY:
empties += 1
else:
filled += 1
else:
unknown.append(nid)
if len(unknown) > 0:
if num == filled:
for u in unknown:
if EMPTY in done[u]:
done.set_done(u, EMPTY)
changed = True
#else:
# raise Exception("Houston, we've got a problem")
elif num == filled + len(unknown):
for u in unknown:
if done.remove(u, EMPTY):
changed = True
return changed
ASCENDING = 1
DESCENDING = -1
def find_moves(pos, strategy, order):
done = pos.done
cell_id = done.next_cell(pos, strategy)
if cell_id < 0:
return []
if order == ASCENDING:
return [(cell_id, v) for v in done[cell_id]]
else:
# Try higher values first and EMPTY last
moves = list(reversed([(cell_id, v) for v in done[cell_id] if v != EMPTY]))
if EMPTY in done[cell_id]:
moves.append((cell_id, EMPTY))
return moves
def play_move(pos, move):
(cell_id, i) = move
pos.done.set_done(cell_id, i)
@cython.locals(x=cython.int, y=cython.int, ry=cython.int, id=cython.int)
def print_pos(pos, output):
hex = pos.hex
done = pos.done
size = hex.size
for y in range(size):
print(u" " * (size - y - 1), end=u"", file=output)
for x in range(size + y):
pos2 = (x, y)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else u"."
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
for y in range(1, size):
print(u" " * y, end=u"", file=output)
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos2 = (x, ry)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else (u".")
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
OPEN = 0
SOLVED = 1
IMPOSSIBLE = -1
@cython.locals(i=cython.int, num=cython.int, nid=cython.int,
vmin=cython.int, vmax=cython.int, tiles=cython.int[8])
def solved(pos, output, verbose=False):
hex = pos.hex
tiles = pos.tiles[:]
done = pos.done
exact = True
all_done = True
for i in range(hex.count):
if len(done[i]) == 0:
return IMPOSSIBLE
elif done.already_done(i):
num = done[i][0]
tiles[num] -= 1
if (tiles[num] < 0):
return IMPOSSIBLE
vmax = 0
vmin = 0
if num != EMPTY:
cells_around = hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
if (num < vmin) or (num > vmax):
return IMPOSSIBLE
if num != vmin:
exact = False
else:
all_done = False
if (not all_done) or (not exact):
return OPEN
print_pos(pos, output)
return SOLVED
@cython.locals(move=tuple)
def solve_step(prev, strategy, order, output, first=False):
if first:
pos = prev.clone()
while constraint_pass(pos):
pass
else:
pos = prev
moves = find_moves(pos, strategy, order)
if len(moves) == 0:
return solved(pos, output)
else:
for move in moves:
#print("Trying (%d, %d)" % (move[0], move[1]))
ret = OPEN
new_pos = pos.clone()
play_move(new_pos, move)
#print_pos(new_pos)
while constraint_pass(new_pos, move[0]):
pass
cur_status = solved(new_pos, output)
if cur_status != OPEN:
ret = cur_status
else:
ret = solve_step(new_pos, strategy, order, output)
if ret == SOLVED:
return SOLVED
return IMPOSSIBLE
@cython.locals(tot=cython.int, tiles=cython.int[8])
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
done = pos.done
# fill missing entries in tiles
tot = 0
for i in range(8):
if tiles[i] > 0:
tot += tiles[i]
else:
tiles[i] = 0
# check total
if tot != hex.count:
raise Exception("Invalid input. Expected %d tiles, got %d." % (hex.count, tot))
def solve(pos, strategy, order, output):
check_valid(pos)
return solve_step(pos, strategy, order, output, first=True)
# TODO Write an 'iterator' to go over all x,y positions
@cython.locals(x=cython.int, y=cython.int, p=cython.int, tiles=cython.int[8],
size=cython.int, inctile=cython.int, linei=cython.int)
def read_file(file):
lines = [line.strip("\r\n") for line in file.splitlines()]
size = int(lines[0])
hex = Hex(size)
linei = 1
tiles = 8 * [0]
done = Done(hex.count)
for y in range(size):
line = lines[linei][size - y - 1:]
p = 0
for x in range(size + y):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, y, hex.get_by_pos((x, y)).id))
done.set_done(hex.get_by_pos((x, y)).id, inctile)
linei += 1
for y in range(1, size):
ry = size - 1 + y
line = lines[linei][y:]
p = 0
for x in range(y, size * 2 - 1):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, ry, hex.get_by_pos((x, ry)).id))
done.set_done(hex.get_by_pos((x, ry)).id, inctile)
linei += 1
hex.link_nodes()
done.filter_tiles(tiles)
return Pos(hex, tiles, done)
def solve_file(file, strategy, order, output):
pos = read_file(file)
solve(pos, strategy, order, output)
def run_level36():
f = """\
4
2 1 1 2
3 3 3 . .
2 3 3 . 4 .
. 2 . 2 4 3 2
2 2 . . . 2
4 3 4 . .
3 2 3 3
"""
order = DESCENDING
strategy = Done.FIRST_STRATEGY
output = StringIO()
solve_file(f, strategy, order, output)
expected = """\
3 4 3 2
3 4 4 . 3
2 . . 3 4 3
2 . 1 . 3 . 2
3 3 . 2 . 2
3 . 2 . 2
2 2 . 1
"""
if output.getvalue() != expected:
raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
def main(n):
# only run 1/25th of the requested number of iterations.
# with the default n=50 from runner.py, this means twice.
l = []
for i in range(n):
t0 = time.time()
run_level36()
time_elapsed = time.time() - t0
l.append(time_elapsed)
return l
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the hexiom2 benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
| |
import re
from datetime import datetime
from tower import ugettext_lazy as _
# Add-on and File statuses.
STATUS_NULL = 0
STATUS_UNREVIEWED = 1
STATUS_PENDING = 2
STATUS_NOMINATED = 3
STATUS_PUBLIC = 4
STATUS_DISABLED = 5
_STATUS_LISTED = 6 # Deprecated. See bug 616242
STATUS_BETA = 7
STATUS_LITE = 8
STATUS_LITE_AND_NOMINATED = 9
STATUS_PURGATORY = 10 # A temporary home; bug 614686
STATUS_DELETED = 11
STATUS_REJECTED = 12 # This applies only to apps (for now)
STATUS_PUBLIC_WAITING = 13 # bug 740967
STATUS_REVIEW_PENDING = 14 # Themes queue, reviewed, needs further action.
STATUS_BLOCKED = 15
STATUS_CHOICES = {
STATUS_NULL: _(u'Incomplete'),
STATUS_UNREVIEWED: _(u'Awaiting Preliminary Review'),
STATUS_PENDING: _(u'Pending approval'),
STATUS_NOMINATED: _(u'Awaiting Full Review'),
STATUS_PUBLIC: _(u'Fully Reviewed'),
STATUS_DISABLED: _(u'Disabled by Mozilla'),
STATUS_BETA: _(u'Beta'),
STATUS_LITE: _(u'Preliminarily Reviewed'),
STATUS_LITE_AND_NOMINATED: _(
u'Preliminarily Reviewed and Awaiting Full Review'),
STATUS_PURGATORY: _(u'Pending a review choice'),
STATUS_DELETED: _(u'Deleted'),
STATUS_REJECTED: _(u'Rejected'),
# Approved, but the developer would like to put it public when they want.
# The need to go to the marketplace and actualy make it public.
STATUS_REVIEW_PENDING: _(u'Flagged for further review'),
STATUS_BLOCKED: _(u'Blocked'),
}
# We need to expose nice values that aren't localisable.
STATUS_CHOICES_API = {
STATUS_NULL: 'incomplete',
STATUS_UNREVIEWED: 'unreviewed',
STATUS_PENDING: 'pending',
STATUS_NOMINATED: 'nominated',
STATUS_PUBLIC: 'public',
STATUS_DISABLED: 'disabled',
STATUS_BETA: 'beta',
STATUS_LITE: 'lite',
STATUS_LITE_AND_NOMINATED: 'lite-nominated',
STATUS_PURGATORY: 'purgatory',
STATUS_DELETED: 'deleted',
STATUS_REJECTED: 'rejected',
STATUS_REVIEW_PENDING: 'review-pending',
STATUS_BLOCKED: 'blocked',
}
STATUS_CHOICES_API_LOOKUP = {
'incomplete': STATUS_NULL,
'unreviewed': STATUS_UNREVIEWED,
'pending': STATUS_PENDING,
'nominated': STATUS_NOMINATED,
'public': STATUS_PUBLIC,
'disabled': STATUS_DISABLED,
'beta': STATUS_BETA,
'lite': STATUS_LITE,
'lite-nominated': STATUS_LITE_AND_NOMINATED,
'purgatory': STATUS_PURGATORY,
'deleted': STATUS_DELETED,
'rejected': STATUS_REJECTED,
'review-pending': STATUS_REVIEW_PENDING,
'blocked': STATUS_BLOCKED,
}
PUBLIC_IMMEDIATELY = None
# Our MySQL does not store microseconds.
PUBLIC_WAIT = datetime.max.replace(microsecond=0)
REVIEWED_STATUSES = (STATUS_LITE, STATUS_LITE_AND_NOMINATED, STATUS_PUBLIC)
UNREVIEWED_STATUSES = (STATUS_UNREVIEWED, STATUS_PENDING, STATUS_NOMINATED,
STATUS_PURGATORY)
VALID_STATUSES = (STATUS_UNREVIEWED, STATUS_PENDING, STATUS_NOMINATED,
STATUS_PUBLIC, STATUS_BETA, STATUS_LITE,
STATUS_LITE_AND_NOMINATED, STATUS_PURGATORY)
# We don't show addons/versions with UNREVIEWED_STATUS in public.
LISTED_STATUSES = tuple(st for st in VALID_STATUSES if st != STATUS_PENDING)
# An add-on in one of these statuses is awaiting a review.
STATUS_UNDER_REVIEW = (STATUS_UNREVIEWED, STATUS_NOMINATED,
STATUS_LITE_AND_NOMINATED)
LITE_STATUSES = (STATUS_LITE, STATUS_LITE_AND_NOMINATED)
MIRROR_STATUSES = (STATUS_PUBLIC, STATUS_BETA,
STATUS_LITE, STATUS_LITE_AND_NOMINATED)
# An add-on in one of these statuses can become premium.
PREMIUM_STATUSES = (STATUS_NULL,) + STATUS_UNDER_REVIEW
# Types of administrative review queues for an add-on:
ADMIN_REVIEW_FULL = 1
ADMIN_REVIEW_PRELIM = 2
ADMIN_REVIEW_TYPES = {
ADMIN_REVIEW_FULL: _(u'Full'),
ADMIN_REVIEW_PRELIM: _(u'Preliminary'),
}
# Add-on author roles.
AUTHOR_ROLE_VIEWER = 1
AUTHOR_ROLE_DEV = 4
AUTHOR_ROLE_OWNER = 5
AUTHOR_ROLE_SUPPORT = 6
AUTHOR_CHOICES = (
(AUTHOR_ROLE_OWNER, _(u'Owner')),
(AUTHOR_ROLE_DEV, _(u'Developer')),
(AUTHOR_ROLE_VIEWER, _(u'Viewer')),
(AUTHOR_ROLE_SUPPORT, _(u'Support')),
)
# Addon types
ADDON_ANY = 0
ADDON_EXTENSION = 1
ADDON_THEME = 2
ADDON_DICT = 3
ADDON_SEARCH = 4
ADDON_LPAPP = 5
ADDON_LPADDON = 6
ADDON_PLUGIN = 7
ADDON_API = 8 # not actually a type but used to identify extensions + themes
ADDON_PERSONA = 9
ADDON_WEBAPP = 11 # Calling this ADDON_* is gross but we've gotta ship code.
# Addon type groupings.
GROUP_TYPE_ADDON = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP,
ADDON_LPADDON, ADDON_PLUGIN, ADDON_API]
GROUP_TYPE_THEME = [ADDON_THEME, ADDON_PERSONA]
# Singular
ADDON_TYPE = {
ADDON_ANY: _(u'Any'),
ADDON_EXTENSION: _(u'Extension'),
ADDON_THEME: _(u'Complete Theme'),
ADDON_DICT: _(u'Dictionary'),
ADDON_SEARCH: _(u'Search Engine'),
ADDON_PLUGIN: _(u'Plugin'),
ADDON_LPAPP: _(u'Language Pack (Application)'),
ADDON_PERSONA: _(u'Theme'),
}
# Plural
ADDON_TYPES = {
ADDON_ANY: _(u'Any'),
ADDON_EXTENSION: _(u'Extensions'),
ADDON_THEME: _(u'Complete Themes'),
ADDON_DICT: _(u'Dictionaries'),
ADDON_SEARCH: _(u'Search Tools'),
ADDON_PLUGIN: _(u'Plugins'),
ADDON_LPAPP: _(u'Language Packs (Application)'),
ADDON_PERSONA: _(u'Themes'),
}
# Searchable Add-on Types
ADDON_SEARCH_TYPES = [
ADDON_ANY,
ADDON_EXTENSION,
ADDON_THEME,
ADDON_DICT,
ADDON_SEARCH,
ADDON_LPAPP,
ADDON_PERSONA,
]
ADDON_ADMIN_SEARCH_TYPES = ADDON_SEARCH_TYPES + [ADDON_PLUGIN]
# Icons
ADDON_ICONS = {
ADDON_ANY: 'default-addon.png',
ADDON_THEME: 'default-theme.png',
}
# We use these slugs in browse page urls.
ADDON_SLUGS = {
ADDON_EXTENSION: 'extensions',
ADDON_THEME: 'themes',
ADDON_DICT: 'language-tools',
ADDON_LPAPP: 'language-tools',
ADDON_PERSONA: 'personas',
ADDON_SEARCH: 'search-tools',
}
# These are used in the update API.
ADDON_SLUGS_UPDATE = {
ADDON_EXTENSION: 'extension',
ADDON_THEME: 'theme',
ADDON_DICT: 'extension',
ADDON_SEARCH: 'search',
ADDON_LPAPP: 'item',
ADDON_LPADDON: 'extension',
ADDON_PERSONA: 'background-theme',
ADDON_PLUGIN: 'plugin',
}
# A slug to ID map for the search API. Included are all ADDON_TYPES that are
# found in ADDON_SEARCH_TYPES.
ADDON_SEARCH_SLUGS = {
'any': ADDON_ANY,
'extension': ADDON_EXTENSION,
'theme': ADDON_THEME,
'dictionary': ADDON_DICT,
'search': ADDON_SEARCH,
'language': ADDON_LPAPP,
'persona': ADDON_PERSONA,
}
ADDON_FREE = 0
ADDON_PREMIUM = 1
ADDON_PREMIUM_INAPP = 2
ADDON_FREE_INAPP = 3
# The addon will have payments, but they aren't using our payment system.
ADDON_OTHER_INAPP = 4
ADDON_PREMIUM_TYPES = {
ADDON_FREE: _('Free'),
ADDON_PREMIUM: _('Premium'),
ADDON_PREMIUM_INAPP: _('Premium with in-app payments'),
ADDON_FREE_INAPP: _('Free with in-app payments'),
ADDON_OTHER_INAPP: _("I'll use my own system for in-app payments")
}
# Non-locale versions for the API.
ADDON_PREMIUM_API = {
ADDON_FREE: 'free',
ADDON_PREMIUM: 'premium',
ADDON_PREMIUM_INAPP: 'premium-inapp',
ADDON_FREE_INAPP: 'free-inapp',
ADDON_OTHER_INAPP: 'other',
}
ADDON_PREMIUM_API_LOOKUP = dict((v, k) for k, v in ADDON_PREMIUM_API.items())
# Apps that require some sort of payment prior to installing.
ADDON_PREMIUMS = (ADDON_PREMIUM, ADDON_PREMIUM_INAPP)
# Apps that do *not* require a payment prior to installing.
ADDON_FREES = (ADDON_FREE, ADDON_FREE_INAPP, ADDON_OTHER_INAPP)
ADDON_INAPPS = (ADDON_PREMIUM_INAPP, ADDON_FREE_INAPP)
ADDON_BECOME_PREMIUM = (ADDON_EXTENSION, ADDON_THEME, ADDON_DICT, ADDON_LPAPP)
ADDON_HAS_PAYMENTS = (ADDON_FREE_INAPP, ADDON_PREMIUM, ADDON_PREMIUM_INAPP)
# Edit addon information
MAX_TAGS = 20
MIN_TAG_LENGTH = 2
MAX_CATEGORIES = 2
# Icon upload sizes
ADDON_ICON_SIZES = [32, 48, 64, 128, 256, 512]
# Preview upload sizes [thumb, full]
ADDON_PREVIEW_SIZES = [(200, 150), (700, 525)]
# Persona image sizes [preview, full]
PERSONA_IMAGE_SIZES = {
'header': [(680, 100), (3000, 200)],
'footer': [None, (3000, 100)],
'icon': [None, (32, 32)],
}
# Accepted image MIME-types
IMG_TYPES = ('image/png', 'image/jpeg', 'image/jpg')
VIDEO_TYPES = ('video/webm',)
# These types don't maintain app compatibility in the db. Instead, we look at
# APP.types and APP_TYPE_SUPPORT to figure out where they are compatible.
NO_COMPAT = (ADDON_SEARCH, ADDON_PERSONA)
HAS_COMPAT = dict((t, t not in NO_COMPAT) for t in ADDON_TYPES)
# Contributions
CONTRIB_NONE = 0
CONTRIB_PASSIVE = 1
CONTRIB_AFTER = 2
CONTRIB_ROADBLOCK = 3
CONTRIB_CHOICES = (
(CONTRIB_PASSIVE,
_(u"Only ask on this add-on's page and developer profile")),
(CONTRIB_AFTER, _(u"Ask after users start downloading this add-on")),
(CONTRIB_ROADBLOCK, _(u"Ask before users can download this add-on")),
)
# Personas
PERSONAS_ADDON_ID = 10900 # Add-on ID of the Personas Plus Add-on
PERSONAS_FIREFOX_MIN = '3.6' # First Firefox version to support Personas
PERSONAS_THUNDERBIRD_MIN = '3.1' # Ditto for Thunderbird
# Collections.
COLLECTION_NORMAL = 0
COLLECTION_SYNCHRONIZED = 1
COLLECTION_FEATURED = 2
COLLECTION_RECOMMENDED = 3
COLLECTION_FAVORITES = 4
COLLECTION_MOBILE = 5
COLLECTION_ANONYMOUS = 6
COLLECTIONS_NO_CONTRIB = (COLLECTION_SYNCHRONIZED, COLLECTION_FAVORITES)
COLLECTION_SPECIAL_SLUGS = {
COLLECTION_MOBILE: 'mobile',
COLLECTION_FAVORITES: 'favorites',
}
COLLECTION_CHOICES = {
COLLECTION_NORMAL: 'Normal',
COLLECTION_SYNCHRONIZED: 'Synchronized',
COLLECTION_FEATURED: 'Featured',
COLLECTION_RECOMMENDED: 'Generated Recommendations',
COLLECTION_FAVORITES: 'Favorites',
COLLECTION_MOBILE: 'Mobile',
COLLECTION_ANONYMOUS: 'Anonymous',
}
COLLECTION_SEARCH_CHOICES = [
COLLECTION_NORMAL,
COLLECTION_FEATURED,
COLLECTION_RECOMMENDED,
COLLECTION_MOBILE,
COLLECTION_ANONYMOUS,
]
COLLECTION_ROLE_PUBLISHER = 0
COLLECTION_ROLE_ADMIN = 1
COLLECTION_AUTHOR_CHOICES = {
COLLECTION_ROLE_PUBLISHER: 'Publisher',
COLLECTION_ROLE_ADMIN: 'Admin',
}
# Contributions.
FOUNDATION_ORG = 1 # The charities.id of the Mozilla Foundation.
VERSION_BETA = re.compile('(a|alpha|b|beta|pre|rc)\d*$')
VERSION_SEARCH = re.compile('\.(\d+)$')
# Editor Tools
EDITOR_VIEWING_INTERVAL = 8 # How often we ping for "who's watching?"
# Types of SiteEvent
SITE_EVENT_OTHER = 1
SITE_EVENT_EXCEPTION = 2
SITE_EVENT_RELEASE = 3
SITE_EVENT_CHANGE = 4
SITE_EVENT_CHOICES = {
SITE_EVENT_OTHER: _('Other'),
SITE_EVENT_EXCEPTION: _('Exception'),
SITE_EVENT_RELEASE: _('Release'),
SITE_EVENT_CHANGE: _('Change'),
}
# Types of Canned Responses for reviewer tools.
CANNED_RESPONSE_ADDON = 1
CANNED_RESPONSE_APP = 2
CANNED_RESPONSE_PERSONA = 3
CANNED_RESPONSE_CHOICES = {
CANNED_RESPONSE_ADDON: _('Add-on'),
CANNED_RESPONSE_APP: _('App'),
CANNED_RESPONSE_PERSONA: _('Persona'),
}
# For use in urls.
ADDON_ID = r"""(?P<addon_id>[^/<>"']+)"""
ADDON_UUID = r'(?P<uuid>[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12})'
APP_SLUG = r"""(?P<app_slug>[^/<>"']+)"""
# Reviewer Incentive Scores.
# Note: Don't change these since they're used as keys in the database.
REVIEWED_MANUAL = 0
REVIEWED_ADDON_FULL = 10
REVIEWED_ADDON_PRELIM = 11
REVIEWED_ADDON_UPDATE = 12
REVIEWED_DICT_FULL = 20
REVIEWED_DICT_PRELIM = 21
REVIEWED_DICT_UPDATE = 22
REVIEWED_LP_FULL = 30
REVIEWED_LP_PRELIM = 31
REVIEWED_LP_UPDATE = 32
REVIEWED_PERSONA = 40
# TODO: Leaving room for persona points based on queue.
REVIEWED_SEARCH_FULL = 50
REVIEWED_SEARCH_PRELIM = 51
REVIEWED_SEARCH_UPDATE = 52
REVIEWED_THEME_FULL = 60
REVIEWED_THEME_PRELIM = 61
REVIEWED_THEME_UPDATE = 62
REVIEWED_ADDON_REVIEW = 80
REVIEWED_APP_REVIEW = 81
REVIEWED_CHOICES = {
REVIEWED_MANUAL: _('Manual Reviewer Points'),
REVIEWED_ADDON_FULL: _('Full Add-on Review'),
REVIEWED_ADDON_PRELIM: _('Preliminary Add-on Review'),
REVIEWED_ADDON_UPDATE: _('Updated Add-on Review'),
REVIEWED_DICT_FULL: _('Full Dictionary Review'),
REVIEWED_DICT_PRELIM: _('Preliminary Dictionary Review'),
REVIEWED_DICT_UPDATE: _('Updated Dictionary Review'),
REVIEWED_LP_FULL: _('Full Language Pack Review'),
REVIEWED_LP_PRELIM: _('Preliminary Language Pack Review'),
REVIEWED_LP_UPDATE: _('Updated Language Pack Review'),
REVIEWED_PERSONA: _('Theme Review'),
REVIEWED_SEARCH_FULL: _('Full Search Provider Review'),
REVIEWED_SEARCH_PRELIM: _('Preliminary Search Provider Review'),
REVIEWED_SEARCH_UPDATE: _('Updated Search Provider Review'),
REVIEWED_THEME_FULL: _('Complete Theme Review'),
REVIEWED_THEME_PRELIM: _('Preliminary Complete Theme Review'),
REVIEWED_THEME_UPDATE: _('Updated Complete Theme Review'),
REVIEWED_ADDON_REVIEW: _('Moderated Addon Review'),
REVIEWED_APP_REVIEW: _('Moderated App Review'),
}
REVIEWED_SCORES = {
REVIEWED_MANUAL: 0,
REVIEWED_ADDON_FULL: 120,
REVIEWED_ADDON_PRELIM: 60,
REVIEWED_ADDON_UPDATE: 80,
REVIEWED_DICT_FULL: 60,
REVIEWED_DICT_PRELIM: 20,
REVIEWED_DICT_UPDATE: 60,
REVIEWED_LP_FULL: 60,
REVIEWED_LP_PRELIM: 20,
REVIEWED_LP_UPDATE: 60,
REVIEWED_PERSONA: 5,
REVIEWED_SEARCH_FULL: 30,
REVIEWED_SEARCH_PRELIM: 10,
REVIEWED_SEARCH_UPDATE: 30,
REVIEWED_THEME_FULL: 80,
REVIEWED_THEME_PRELIM: 40,
REVIEWED_THEME_UPDATE: 80,
REVIEWED_ADDON_REVIEW: 1,
REVIEWED_APP_REVIEW: 1,
}
REVIEWED_AMO = (
REVIEWED_ADDON_FULL,
REVIEWED_ADDON_PRELIM,
REVIEWED_ADDON_UPDATE,
REVIEWED_DICT_FULL,
REVIEWED_DICT_PRELIM,
REVIEWED_DICT_UPDATE,
REVIEWED_LP_FULL,
REVIEWED_LP_PRELIM,
REVIEWED_LP_UPDATE,
REVIEWED_SEARCH_FULL,
REVIEWED_SEARCH_PRELIM,
REVIEWED_SEARCH_UPDATE,
REVIEWED_THEME_FULL,
REVIEWED_THEME_PRELIM,
REVIEWED_THEME_UPDATE,
REVIEWED_ADDON_REVIEW,
)
REVIEWED_LEVELS = [
{'name': _('Level 1'), 'points': 2160},
{'name': _('Level 2'), 'points': 4320},
{'name': _('Level 3'), 'points': 8700},
{'name': _('Level 4'), 'points': 21000},
{'name': _('Level 5'), 'points': 45000},
{'name': _('Level 6'), 'points': 96000},
]
# Login credential source. We'll also include the site source in that.
# All the old existing AMO users and anyone before we started tracking this.
LOGIN_SOURCE_UNKNOWN = 0
# Most likely everyone who signed up for the marketplace.
LOGIN_SOURCE_BROWSERID = 1
# Everyone who signed up for the marketplace using BrowserID.
LOGIN_SOURCE_MMO_BROWSERID = 2
# Everyone who signed up for AMO once it uses BrowserID.
LOGIN_SOURCE_AMO_BROWSERID = 3
# These are logins that use BrowserID.
LOGIN_SOURCE_BROWSERIDS = [LOGIN_SOURCE_BROWSERID, LOGIN_SOURCE_AMO_BROWSERID,
LOGIN_SOURCE_MMO_BROWSERID]
| |
# $Id$
#
# Copyright (C) 2007-2008 Greg Landrum
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import re
#set up the logger:
import rdkit.RDLogger as logging
from rdkit import Chem
from rdkit.Chem import AllChem, Crippen, Descriptors, Lipinski
from rdkit.Dbase import DbModule
from rdkit.Dbase.DbConnection import DbConnect
logger = logging.logger()
logger.setLevel(logging.INFO)
def ProcessMol(mol, typeConversions, globalProps, nDone, nameProp='_Name', nameCol='compound_id',
redraw=False, keepHs=False, skipProps=False, addComputedProps=False,
skipSmiles=False, uniqNames=None, namesSeen=None):
if not mol:
raise ValueError('no molecule')
if keepHs:
Chem.SanitizeMol(mol)
try:
nm = mol.GetProp(nameProp)
except KeyError:
nm = None
if not nm:
nm = f'Mol_{nDone}'
if uniqNames and nm in namesSeen:
logger.error(f'duplicate compound id ({nm}) encountered. second instance skipped.')
return None
namesSeen.add(nm)
row = [nm]
pD = {}
if not skipProps:
if addComputedProps:
nHD = Lipinski.NumHDonors(mol)
mol.SetProp('DonorCount', str(nHD))
nHA = Lipinski.NumHAcceptors(mol)
mol.SetProp('AcceptorCount', str(nHA))
nRot = Lipinski.NumRotatableBonds(mol)
mol.SetProp('RotatableBondCount', str(nRot))
MW = Descriptors.MolWt(mol)
mol.SetProp('AMW', str(MW))
logp = Crippen.MolLogP(mol)
mol.SetProp('MolLogP', str(logp))
pns = list(mol.GetPropNames())
for pn in pns:
if pn.lower() == nameCol.lower():
continue
pv = mol.GetProp(pn).strip()
if pv.find('>') < 0 and pv.find('<') < 0:
colTyp = globalProps.get(pn, 2)
while colTyp > 0:
try:
_ = typeConversions[colTyp][1](pv)
except Exception:
colTyp -= 1
else:
break
globalProps[pn] = colTyp
pD[pn] = typeConversions[colTyp][1](pv)
else:
pD[pn] = pv
if redraw:
AllChem.Compute2DCoords(m)
if not skipSmiles:
row.append(Chem.MolToSmiles(mol))
row.append(DbModule.binaryHolder(mol.ToBinary()))
row.append(pD)
return row
def ConvertRows(rows, globalProps, defaultVal, skipSmiles):
for i, row in enumerate(rows):
newRow = [row[0], row[1]]
pD = row[-1]
newRow.extend(pD.get(pn, defaultVal) for pn in globalProps)
newRow.append(row[2])
if not skipSmiles:
newRow.append(row[3])
rows[i] = newRow
def LoadDb(suppl, dbName, nameProp='_Name', nameCol='compound_id', silent=False, redraw=False,
errorsTo=None, keepHs=False, defaultVal='N/A', skipProps=False, regName='molecules',
skipSmiles=False, maxRowsCached=-1, uniqNames=False, addComputedProps=False,
lazySupplier=False, startAnew=True):
if not lazySupplier:
nMols = len(suppl)
else:
nMols = -1
if not silent:
logger.info(f"Generating molecular database in file {dbName}")
if not lazySupplier:
logger.info(f" Processing {nMols} molecules")
rows = []
globalProps = {}
namesSeen = set()
nDone = 0
typeConversions = {0: ('varchar', str), 1: ('float', float), 2: ('int', int)}
for m in suppl:
nDone += 1
if not m:
if errorsTo:
if hasattr(suppl, 'GetItemText'):
d = suppl.GetItemText(nDone - 1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row = ProcessMol(m, typeConversions, globalProps, nDone, nameProp=nameProp, nameCol=nameCol,
redraw=redraw, keepHs=keepHs, skipProps=skipProps,
addComputedProps=addComputedProps, skipSmiles=skipSmiles, uniqNames=uniqNames,
namesSeen=namesSeen)
if row is None:
continue
rows.append([nDone] + row)
if not silent and not nDone % 100:
logger.info(f' done {nDone}')
if len(rows) == maxRowsCached:
break
nameDef = f'{nameCol} varchar not null'
if uniqNames:
nameDef += ' unique'
typs = ['guid integer not null primary key', nameDef]
pns = []
for pn, v in globalProps.items():
addNm = re.sub(r'[\W]', '_', pn)
typs.append(f'{addNm} {typeConversions[v][0]}')
pns.append(pn.lower())
if not skipSmiles:
if 'smiles' not in pns:
typs.append('smiles varchar')
else:
typs.append('cansmiles varchar')
typs.append(f'molpkl {DbModule.binaryTypeName}')
conn = DbConnect(dbName)
curs = conn.GetCursor()
if startAnew:
try:
curs.execute(f'drop table {regName}')
except Exception:
pass
curs.execute(f'create table {regName} ({",".join(typs)})')
else:
curs.execute(f'select * from {regName} limit 1')
ocolns = set([x[0] for x in curs.description])
ncolns = set([x.split()[0] for x in typs])
if ncolns != ocolns:
raise ValueError(f'Column names do not match: {ocolns} != {ncolns}')
curs.execute(f'select max(guid) from {regName}')
offset = curs.fetchone()[0]
for row in rows:
row[0] += offset
qs = ','.join([DbModule.placeHolder for _ in typs])
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany(f'insert into {regName} values ({qs})', rows)
conn.Commit()
rows = []
while 1:
nDone += 1
try:
m = next(suppl)
except StopIteration:
break
if not m:
if errorsTo:
if hasattr(suppl, 'GetItemText'):
d = suppl.GetItemText(nDone - 1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row = ProcessMol(m, typeConversions, globalProps, nDone, nameProp=nameProp, nameCol=nameCol,
redraw=redraw, keepHs=keepHs, skipProps=skipProps,
addComputedProps=addComputedProps, skipSmiles=skipSmiles, uniqNames=uniqNames,
namesSeen=namesSeen)
if not row:
continue
rows.append([nDone] + row)
if not silent and not nDone % 100:
logger.info(f' done {nDone}')
if len(rows) == maxRowsCached:
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany(f'insert into {regName} values ({qs})', rows)
conn.Commit()
rows = []
if len(rows):
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany(f'insert into {regName} values ({qs})', rows)
conn.Commit()
| |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from compiled_file_system import CompiledFileSystem
from content_providers import ContentProviders
from extensions_paths import CHROME_EXTENSIONS
from gcs_file_system_provider import CloudStorageFileSystemProvider
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_util import DisableLogging
_CONTENT_PROVIDERS = {
'apples': {
'chromium': {
'dir': 'chrome/common/extensions/apples'
},
'serveFrom': 'apples-dir',
},
'bananas': {
'serveFrom': '',
'chromium': {
'dir': 'chrome/common/extensions'
},
},
'github-provider': {
'serveFrom': 'gh',
'github': {
'dir': 'chrome/common/extensions',
'owner': 'GoogleChrome',
'repo': 'hello-world',
},
},
'github-provider-with-dir': {
'serveFrom': 'gh2',
'github': {
'dir': 'chrome/common/extensions/tomatoes/are/a',
'owner': 'SomeOwner',
'repo': 'some-repo',
},
},
'tomatoes': {
'serveFrom': 'tomatoes-dir/are/a',
'chromium': {
'dir': 'chrome/common/extensions/tomatoes/are/a'
},
},
}
_FILE_SYSTEM_DATA = {
'docs': {
'templates': {
'json': {
'content_providers.json': json.dumps(_CONTENT_PROVIDERS),
},
},
},
'apples': {
'gala.txt': 'gala apples',
'green': {
'granny smith.txt': 'granny smith apples',
},
},
'tomatoes': {
'are': {
'a': {
'vegetable.txt': 'no they aren\'t',
'fruit': {
'cherry.txt': 'cherry tomatoes',
},
},
},
},
}
class _MockGithubFileSystemProvider(object):
'''A GithubFileSystemProvider imitation which records every call to Create
and returns them from GetAndReset.
'''
def __init__(self, file_system):
self._file_system = file_system
self._calls = []
def Create(self, owner, repo):
self._calls.append((owner, repo))
return self._file_system
def GetAndReset(self):
calls = self._calls
self._calls = []
return calls
class ContentProvidersTest(unittest.TestCase):
def setUp(self):
object_store_creator = ObjectStoreCreator.ForTest()
test_file_system = TestFileSystem(_FILE_SYSTEM_DATA,
relative_to=CHROME_EXTENSIONS)
self._github_fs_provider = _MockGithubFileSystemProvider(test_file_system)
object_store_creator = ObjectStoreCreator.ForTest()
# TODO(mangini): create tests for GCS
self._gcs_fs_provider = CloudStorageFileSystemProvider(object_store_creator)
self._content_providers = ContentProviders(
object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
test_file_system,
self._gcs_fs_provider)
def testSimpleRootPath(self):
provider = self._content_providers.GetByName('apples')
self.assertEqual(
'gala apples',
provider.GetContentAndType('gala.txt').Get().content)
self.assertEqual(
'granny smith apples',
provider.GetContentAndType('green/granny smith.txt').Get().content)
def testComplexRootPath(self):
provider = self._content_providers.GetByName('tomatoes')
self.assertEqual(
'no they aren\'t',
provider.GetContentAndType('vegetable.txt').Get().content)
self.assertEqual(
'cherry tomatoes',
provider.GetContentAndType('fruit/cherry.txt').Get().content)
def testParentRootPath(self):
provider = self._content_providers.GetByName('bananas')
self.assertEqual(
'gala apples',
provider.GetContentAndType('apples/gala.txt').Get().content)
def testSimpleServlet(self):
provider, serve_from, path = self._content_providers.GetByServeFrom(
'apples-dir')
self.assertEqual('apples', provider.name)
self.assertEqual('apples-dir', serve_from)
self.assertEqual('', path)
provider, serve_from, path = self._content_providers.GetByServeFrom(
'apples-dir/')
self.assertEqual('apples', provider.name)
self.assertEqual('apples-dir', serve_from)
self.assertEqual('', path)
provider, serve_from, path = self._content_providers.GetByServeFrom(
'apples-dir/are/forever')
self.assertEqual('apples', provider.name)
self.assertEqual('apples-dir', serve_from)
self.assertEqual('are/forever', path)
def testComplexServlet(self):
provider, serve_from, path = self._content_providers.GetByServeFrom(
'tomatoes-dir/are/a')
self.assertEqual('tomatoes', provider.name)
self.assertEqual('tomatoes-dir/are/a', serve_from)
self.assertEqual('', path)
provider, serve_from, path = self._content_providers.GetByServeFrom(
'tomatoes-dir/are/a/fruit/they/are')
self.assertEqual('tomatoes', provider.name)
self.assertEqual('tomatoes-dir/are/a', serve_from)
self.assertEqual('fruit/they/are', path)
def testEmptyStringServlet(self):
provider, serve_from, path = self._content_providers.GetByServeFrom(
'tomatoes-dir/are')
self.assertEqual('bananas', provider.name)
self.assertEqual('', serve_from)
self.assertEqual('tomatoes-dir/are', path)
provider, serve_from, path = self._content_providers.GetByServeFrom('')
self.assertEqual('bananas', provider.name)
self.assertEqual('', serve_from)
self.assertEqual('', path)
@DisableLogging('error')
def testProviderNotFound(self):
self.assertEqual(None, self._content_providers.GetByName('cabbages'))
# TODO: Re-enable these Github tests if we ever decide to restore our support
# for Github content providers.
# def testGithubContentProvider(self):
# provider, serve_from, path = self._content_providers.GetByServeFrom(
# 'gh/apples/green/granny smith.txt')
# self.assertEqual('github-provider', provider.name)
# self.assertEqual('gh', serve_from)
# self.assertEqual('apples/green/granny smith.txt', path)
# self.assertEqual([('GoogleChrome', 'hello-world')],
# self._github_fs_provider.GetAndReset())
# self.assertEqual(
# 'granny smith apples',
# provider.GetContentAndType(path).Get().content)
# def testGithubContentProviderWithDir(self):
# provider, serve_from, path = self._content_providers.GetByServeFrom(
# 'gh2/fruit/cherry.txt')
# self.assertEqual('github-provider-with-dir', provider.name)
# self.assertEqual('gh2', serve_from)
# self.assertEqual('fruit/cherry.txt', path)
# self.assertEqual([('SomeOwner', 'some-repo')],
# self._github_fs_provider.GetAndReset())
# self.assertEqual(
# 'cherry tomatoes',
# provider.GetContentAndType(path).Get().content)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
from six.moves import http_client as http
import webob.exc
from wsme.rest import json
from glance.api import policy
from glance.api.v2.model.metadef_resource_type import ResourceType
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation
from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations
from glance.api.v2.model.metadef_resource_type import ResourceTypes
from glance.common import exception
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
class ResourceTypeController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier,
policy_enforcer=self.policy)
def index(self, req):
try:
filters = {'namespace': None}
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceType.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypes()
resource_types.resource_types = resource_type_list
except exception.Forbidden as e:
LOG.debug("User not permitted to retrieve metadata resource types "
"index")
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def show(self, req, namespace):
try:
filters = {'namespace': namespace}
rs_type_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type_list = rs_type_repo.list(filters=filters)
resource_type_list = [ResourceTypeAssociation.to_wsme_model(
resource_type) for resource_type in db_resource_type_list]
resource_types = ResourceTypeAssociations()
resource_types.resource_type_associations = resource_type_list
except exception.Forbidden as e:
LOG.debug("User not permitted to retrieve metadata resource types "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError(e)
return resource_types
def create(self, req, resource_type, namespace):
rs_type_factory = self.gateway.get_metadef_resource_type_factory(
req.context)
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
new_resource_type = rs_type_factory.new_resource_type(
namespace=namespace, **resource_type.to_dict())
rs_type_repo.add(new_resource_type)
except exception.Forbidden as e:
LOG.debug("User not permitted to create metadata resource type "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
return ResourceTypeAssociation.to_wsme_model(new_resource_type)
def delete(self, req, namespace, resource_type):
rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context)
try:
filters = {}
found = False
filters['namespace'] = namespace
db_resource_type_list = rs_type_repo.list(filters=filters)
for db_resource_type in db_resource_type_list:
if db_resource_type.name == resource_type:
db_resource_type.delete()
rs_type_repo.remove(db_resource_type)
found = True
if not found:
raise exception.NotFound()
except exception.Forbidden as e:
LOG.debug("User not permitted to delete metadata resource type "
"'%s' within '%s' namespace", resource_type, namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
msg = (_("Failed to find resource type %(resourcetype)s to "
"delete") % {'resourcetype': resource_type})
LOG.error(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
resource_type = json.fromjson(ResourceTypeAssociation, body)
return dict(resource_type=resource_type)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def show(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociations, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, result):
resource_type_json = json.tojson(ResourceTypes, result)
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def create(self, response, result):
resource_type_json = json.tojson(ResourceTypeAssociation, result)
response.status_int = http.CREATED
body = jsonutils.dumps(resource_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def delete(self, response, result):
response.status_int = http.NO_CONTENT
def _get_base_properties():
return {
'name': {
'type': 'string',
'description': _('Resource type names should be aligned with Heat '
'resource types whenever possible: '
'http://docs.openstack.org/developer/heat/'
'template_guide/openstack.html'),
'maxLength': 80,
},
'prefix': {
'type': 'string',
'description': _('Specifies the prefix to use for the given '
'resource type. Any properties in the namespace '
'should be prefixed with this prefix when being '
'applied to the specified resource type. Must '
'include prefix separator (e.g. a colon :).'),
'maxLength': 80,
},
'properties_target': {
'type': 'string',
'description': _('Some resource types allow more than one key / '
'value pair per instance. For example, Cinder '
'allows user and image metadata on volumes. Only '
'the image properties metadata is evaluated by '
'Nova (scheduling or drivers). This property '
'allows a namespace target to remove the '
'ambiguity.'),
'maxLength': 80,
},
"created_at": {
"type": "string",
"readOnly": True,
"description": _("Date and time of resource type association"),
"format": "date-time"
},
"updated_at": {
"type": "string",
"readOnly": True,
"description": _("Date and time of the last resource type "
"association modification"),
"format": "date-time"
}
}
def get_schema():
properties = _get_base_properties()
mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs()
schema = glance.schema.Schema(
'resource_type_association',
properties,
required=mandatory_attrs,
)
return schema
def get_collection_schema():
resource_type_schema = get_schema()
return glance.schema.CollectionSchema('resource_type_associations',
resource_type_schema)
def create_resource():
"""ResourceTypeAssociation resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = ResourceTypeController()
return wsgi.Resource(controller, deserializer, serializer)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
import os
import re
import tempfile
from oslo_log import log as logging
import pexpect
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
import trove.guestagent.datastore.mysql.service as dbaas
from trove.guestagent.strategies.restore import base
LOG = logging.getLogger(__name__)
class MySQLRestoreMixin(object):
"""Common utils for restoring MySQL databases."""
RESET_ROOT_RETRY_TIMEOUT = 100
RESET_ROOT_SLEEP_INTERVAL = 10
# Reset the root password in a single transaction with 'FLUSH PRIVILEGES'
# to ensure we never leave database wide open without 'grant tables'.
RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;",
"UPDATE `mysql`.`user` SET"
" `password`=PASSWORD('')"
" WHERE `user`='root'"
" AND `host` = 'localhost';",
"FLUSH PRIVILEGES;",
"COMMIT;")
# This is a suffix MySQL appends to the file name given in
# the '--log-error' startup parameter.
_ERROR_LOG_SUFFIX = '.err'
_ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$")
def mysql_is_running(self):
try:
utils.execute_with_timeout("/usr/bin/mysqladmin", "ping")
LOG.debug("MySQL is up and running.")
return True
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
LOG.info(_("MySQL is still running."))
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
try:
utils.poll_until(event,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
except exception.PollTimeOut:
raise exc
def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
child = pexpect.spawn("sudo mysqld_safe"
" --skip-grant-tables"
" --skip-networking"
" --init-file='%s'"
" --log-error='%s'" %
(init_file.name, err_log_file.name)
)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("Got a timeout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
#
# mysqld_safe will start even if init-file statement(s) fail.
# We therefore also check for errors in the log file.
self.poll_until_then_raise(
self.mysql_is_running,
base.RestoreError("Reset root password failed:"
" mysqld did not start!"))
first_err_message = self._find_first_error_message(err_log_file)
if first_err_message:
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown")
LOG.debug("Polling for shutdown to complete.")
try:
utils.poll_until(self.mysql_is_not_running,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
LOG.debug("Database successfully shutdown")
except exception.PollTimeOut:
LOG.debug("Timeout shutting down database "
"- performing killall on mysqld_safe.")
utils.execute_with_timeout("killall", "mysqld_safe",
root_helper="sudo",
run_as_root=True)
self.poll_until_then_raise(
self.mysql_is_not_running,
base.RestoreError("Reset root password failed: "
"mysqld did not stop!"))
def reset_root_password(self):
"""Reset the password of the localhost root account used by Trove
for initial datastore configuration.
"""
with tempfile.NamedTemporaryFile(mode='w') as init_file:
operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL,
as_root=True)
self._writelines_one_per_line(init_file,
self.RESET_ROOT_MYSQL_COMMANDS)
# Do not attempt to delete the file as the 'trove' user.
# The process writing into it may have assumed its ownership.
# Only owners can delete temporary
# files (restricted deletion).
err_log_file = tempfile.NamedTemporaryFile(
suffix=self._ERROR_LOG_SUFFIX,
delete=False)
try:
self._start_mysqld_safe_with_init_file(init_file, err_log_file)
finally:
err_log_file.close()
MySQLRestoreMixin._delete_file(err_log_file.name)
def _writelines_one_per_line(self, fp, lines):
fp.write(os.linesep.join(lines))
fp.flush()
def _find_first_error_message(self, fp):
if MySQLRestoreMixin._is_non_zero_file(fp):
return MySQLRestoreMixin._find_first_pattern_match(
fp,
self._ERROR_MESSAGE_PATTERN
)
return None
@classmethod
def _delete_file(self, file_path):
"""Force-remove a given file as root.
Do not raise an exception on failure.
"""
if os.path.isfile(file_path):
try:
operating_system.remove(file_path, force=True, as_root=True)
except Exception:
LOG.exception(_("Could not remove file: '%s'") % file_path)
@classmethod
def _is_non_zero_file(self, fp):
file_path = fp.name
return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0)
@classmethod
def _find_first_pattern_match(self, fp, pattern):
for line in fp:
if pattern.match(line):
return line
return None
class MySQLDump(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for MySQLDump."""
__strategy_name__ = 'mysqldump'
base_restore_cmd = 'sudo mysql'
class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for InnoBackupEx."""
__strategy_name__ = 'innobackupex'
base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
base_prepare_cmd = ('sudo innobackupex'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup=xtrabackup'
' --apply-log'
' %(restore_location)s'
' 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
self._app = None
super(InnoBackupEx, self).__init__(*args, **kwargs)
self.prepare_cmd = self.base_prepare_cmd % kwargs
self.prep_retcode = None
@property
def app(self):
if self._app is None:
self._app = self._build_app()
return self._app
def _build_app(self):
return dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
def pre_restore(self):
self.app.stop_db()
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
operating_system.chmod(self.restore_location, FileMode.SET_FULL,
as_root=True)
utils.clean_out(self.restore_location)
def _run_prepare(self):
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def post_restore(self):
self._run_prepare()
operating_system.chown(self.restore_location, 'mysql', None,
force=True, as_root=True)
self._delete_old_binlogs()
self.reset_root_password()
self.app.start_mysql()
def _delete_old_binlogs(self):
files = glob.glob(os.path.join(self.restore_location, "ib_logfile*"))
for f in files:
os.unlink(f)
class InnoBackupExIncremental(InnoBackupEx):
__strategy_name__ = 'innobackupexincremental'
incremental_prep = ('sudo innobackupex'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup=xtrabackup'
' --apply-log'
' --redo-only'
' %(restore_location)s'
' %(incremental_args)s'
' 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
self.restore_location = kwargs.get('restore_location')
self.content_length = 0
def _incremental_restore_cmd(self, incremental_dir):
"""Return a command for a restore with a incremental location."""
args = {'restore_location': incremental_dir}
return (self.decrypt_cmd +
self.unzip_cmd +
(self.base_restore_cmd % args))
def _incremental_prepare_cmd(self, incremental_dir):
if incremental_dir is not None:
incremental_arg = '--incremental-dir=%s' % incremental_dir
else:
incremental_arg = ''
args = {
'restore_location': self.restore_location,
'incremental_args': incremental_arg,
}
return self.incremental_prep % args
def _incremental_prepare(self, incremental_dir):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
If we are the parent then we restore to the restore_location and
we apply the logs to the restore_location only.
Otherwise if we are an incremental we restore to a subfolder to
prevent stomping on the full restore data. Then we run apply log
with the '--incremental-dir' flag
"""
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s.") % metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
self._incremental_restore(parent_location, parent_checksum)
# for *this* backup set the incremental_dir
# just use the checksum for the incremental path as it is
# sufficiently unique /var/lib/mysql/<checksum>
incremental_dir = os.path.join(
cfg.get_configuration_property('mount_point'), checksum)
operating_system.create_directory(incremental_dir, as_root=True)
command = self._incremental_restore_cmd(incremental_dir)
else:
# The parent (full backup) use the same command from InnobackupEx
# super class and do not set an incremental_dir.
command = self.restore_cmd
self.content_length += self._unpack(location, checksum, command)
self._incremental_prepare(incremental_dir)
# Delete unpacked incremental backup metadata
if incremental_dir:
operating_system.remove(incremental_dir, force=True, as_root=True)
def _run_restore(self):
"""Run incremental restore.
First grab all parents and prepare them with '--redo-only'. After
all backups are restored the super class InnoBackupEx post_restore
method is called to do the final prepare with '--apply-log'
"""
self._incremental_restore(self.location, self.checksum)
return self.content_length
| |
u'''
Created on Dec 16, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os, re
from collections import defaultdict
from lxml import etree
from arelle import UrlUtil
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
def compileAttrPattern(elt, attrName, flags=None):
attr = elt.get(attrName)
if attr is None: attr = u""
if flags is not None:
return re.compile(attr, flags)
else:
return re.compile(attr)
class ErxlLoc(object):
def __init__(self, family, version, href, attType, elements, namespace):
self.family = family
self.version = version
self.href = href
self.attType = attType
self.elements = elements
self.namespace = namespace
class DisclosureSystem(object):
def __init__(self, modelManager):
self.modelManager = modelManager
self.clear()
def clear(self):
self.selection = None
self.standardTaxonomiesDict = {}
self.familyHrefs = {}
self.standardLocalHrefs = set()
self.standardAuthorities = set()
self.baseTaxonomyNamespaces = set()
self.standardPrefixes = {}
self.names = None
self.name = None
self.validationType = None
self.EFM = False
self.GFM = False
self.EFMorGFM = False
self.HMRC = False
self.SBRNL = False
for pluginXbrlMethod in pluginClassMethods(u"DisclosureSystem.Types"):
for typeName, typeTestVariable in pluginXbrlMethod(self):
setattr(self, typeTestVariable, False)
self.validateFileText = False
self.schemaValidateSchema = None
self.blockDisallowedReferences = False
self.maxSubmissionSubdirectoryEntryNesting = 0
self.defaultXmlLang = None
self.xmlLangPattern = None
self.defaultLanguage = None
self.language = None
self.standardTaxonomiesUrl = None
self.mappingsUrl = os.path.join(self.modelManager.cntlr.configDir, u"mappings.xml")
self.mappedFiles = {}
self.mappedPaths = []
self.utrUrl = u"http://www.xbrl.org/utr/utr.xml"
self.utrTypeEntries = None
self.identifierSchemePattern = None
self.identifierValuePattern = None
self.identifierValueName = None
self.contextElement = None
self.roleDefinitionPattern = None
self.labelCheckPattern = None
self.labelTrimPattern = None
self.deiNamespacePattern = None
self.deiAmendmentFlagElement = None
self.deiCurrentFiscalYearEndDateElement = None
self.deiDocumentFiscalYearFocusElement = None
self.deiDocumentPeriodEndDateElement = None
self.deiFilerIdentifierElement = None
self.deiFilerNameElement = None
self.logLevelFilter = None
self.logCodeFilter = None
self.version = (0,0,0)
@property
def dir(self):
return self.dirlist(u"dir")
@property
def urls(self):
_urls = [os.path.join(self.modelManager.cntlr.configDir, u"disclosuresystems.xml")]
# get custom config xml file url
for pluginXbrlMethod in pluginClassMethods(u"DisclosureSystem.ConfigURL"):
_urls.append(pluginXbrlMethod(self))
return _urls
@property
def url(self): # needed for status messages (not used in this module)
return u", ".join(os.path.basename(url) for url in self.urls)
def dirlist(self, listFormat):
self.modelManager.cntlr.showStatus(_(u"parsing disclosuresystems.xml"))
namepaths = []
try:
for url in self.urls:
xmldoc = etree.parse(url)
for dsElt in xmldoc.iter(tag=u"DisclosureSystem"):
if dsElt.get(u"names"):
names = dsElt.get(u"names").split(u"|")
if listFormat == u"help": # terse help
namepaths.append(u'{0}: {1}'.format(names[-1],names[0]))
elif listFormat == u"help-verbose":
namepaths.append(u'{0}: {1}\n{2}\n'.format(names[-1],
names[0],
dsElt.get(u"description").replace(u'\\n',u'\n')))
elif listFormat == u"dir":
namepaths.append((names[0],
dsElt.get(u"description")))
except (EnvironmentError,
etree.LxmlError), err:
self.modelManager.cntlr.addToLog(u"disclosuresystems.xml: import error: {0}".format(err))
self.modelManager.cntlr.showStatus(u"")
return namepaths
def select(self, name):
self.clear()
status = _(u"loading disclosure system and mappings")
try:
if name:
isSelected = False
for url in self.urls:
xmldoc = etree.parse(url)
for dsElt in xmldoc.iter(tag=u"DisclosureSystem"):
namesStr = dsElt.get(u"names")
if namesStr:
names = namesStr.split(u"|")
if name in names:
self.names = names
self.name = self.names[0]
self.validationType = dsElt.get(u"validationType")
self.EFM = self.validationType == u"EFM"
self.GFM = self.validationType == u"GFM"
self.EFMorGFM = self.EFM or self.GFM
self.HMRC = self.validationType == u"HMRC"
self.SBRNL = self.validationType == u"SBR-NL"
for pluginXbrlMethod in pluginClassMethods(u"DisclosureSystem.Types"):
for typeName, typeTestVariable in pluginXbrlMethod(self):
setattr(self, typeTestVariable, self.validationType == typeName)
self.validateFileText = dsElt.get(u"validateFileText") == u"true"
self.blockDisallowedReferences = dsElt.get(u"blockDisallowedReferences") == u"true"
try:
self.maxSubmissionSubdirectoryEntryNesting = int(dsElt.get(u"maxSubmissionSubdirectoryEntryNesting"))
except (ValueError, TypeError):
self.maxSubmissionSubdirectoryEntryNesting = 0
self.defaultXmlLang = dsElt.get(u"defaultXmlLang")
self.xmlLangPattern = compileAttrPattern(dsElt,u"xmlLangPattern")
self.defaultLanguage = dsElt.get(u"defaultLanguage")
self.standardTaxonomiesUrl = self.modelManager.cntlr.webCache.normalizeUrl(
dsElt.get(u"standardTaxonomiesUrl"),
url)
if dsElt.get(u"mappingsUrl"):
self.mappingsUrl = self.modelManager.cntlr.webCache.normalizeUrl(
dsElt.get(u"mappingsUrl"),
url)
if dsElt.get(u"utrUrl"): # may be mapped by mappingsUrl entries, see below
self.utrUrl = self.modelManager.cntlr.webCache.normalizeUrl(
dsElt.get(u"utrUrl"),
url)
self.identifierSchemePattern = compileAttrPattern(dsElt,u"identifierSchemePattern")
self.identifierValuePattern = compileAttrPattern(dsElt,u"identifierValuePattern")
self.identifierValueName = dsElt.get(u"identifierValueName")
self.contextElement = dsElt.get(u"contextElement")
self.roleDefinitionPattern = compileAttrPattern(dsElt,u"roleDefinitionPattern")
self.labelCheckPattern = compileAttrPattern(dsElt,u"labelCheckPattern", re.DOTALL)
self.labelTrimPattern = compileAttrPattern(dsElt,u"labelTrimPattern", re.DOTALL)
self.deiNamespacePattern = compileAttrPattern(dsElt,u"deiNamespacePattern")
self.deiAmendmentFlagElement = dsElt.get(u"deiAmendmentFlagElement")
self.deiCurrentFiscalYearEndDateElement = dsElt.get(u"deiCurrentFiscalYearEndDateElement")
self.deiDocumentFiscalYearFocusElement = dsElt.get(u"deiDocumentFiscalYearFocusElement")
self.deiDocumentPeriodEndDateElement = dsElt.get(u"deiDocumentPeriodEndDateElement")
self.deiFilerIdentifierElement = dsElt.get(u"deiFilerIdentifierElement")
self.deiFilerNameElement = dsElt.get(u"deiFilerNameElement")
self.logLevelFilter = dsElt.get(u"logLevelFilter")
self.logCodeFilter = dsElt.get(u"logCodeFilter")
self.selection = self.name
isSelected = True
break
if isSelected:
break
self.loadMappings()
self.utrUrl = self.mappedUrl(self.utrUrl) # utr may be mapped, change to its mapped entry
self.loadStandardTaxonomiesDict()
self.utrTypeEntries = None # clear any prior loaded entries
# set log level filters (including resetting prior disclosure systems values if no such filter)
self.modelManager.cntlr.setLogLevelFilter(self.logLevelFilter) # None or "" clears out prior filter if any
self.modelManager.cntlr.setLogCodeFilter(self.logCodeFilter)
status = _(u"loaded")
result = True
except (EnvironmentError,
etree.LxmlError), err:
status = _(u"exception during loading")
result = False
self.modelManager.cntlr.addToLog(u"disclosuresystems.xml: import error: {0}".format(err))
etree.clear_error_log()
self.modelManager.cntlr.showStatus(_(u"Disclosure system and mappings {0}: {1}").format(status,name), 3500)
return result
def loadStandardTaxonomiesDict(self):
if self.selection:
self.standardTaxonomiesDict = defaultdict(set)
self.familyHrefs = defaultdict(set)
self.standardLocalHrefs = defaultdict(set)
self.standardAuthorities = set()
self.standardPrefixes = {}
if not self.standardTaxonomiesUrl:
return
basename = os.path.basename(self.standardTaxonomiesUrl)
self.modelManager.cntlr.showStatus(_(u"parsing {0}").format(basename))
file = None
try:
from arelle.FileSource import openXmlFileStream
for filepath in (self.standardTaxonomiesUrl,
os.path.join(self.modelManager.cntlr.configDir,u"xbrlschemafiles.xml")):
file = openXmlFileStream(self.modelManager.cntlr, filepath, stripDeclaration=True)[0]
xmldoc = etree.parse(file)
file.close()
for erxlElt in xmldoc.iter(tag=u"Erxl"):
v = erxlElt.get(u"version")
if v and re.match(ur"[0-9]+([.][0-9]+)*$", v):
vSplit = v.split(u'.') # at least 3 digits always!
self.version = tuple(int(n) for n in vSplit) + tuple(0 for n in xrange(3 - len(vSplit)))
break
for locElt in xmldoc.iter(tag=u"Loc"):
href = None
localHref = None
namespaceUri = None
prefix = None
attType = None
family = None
elements = None
version = None
for childElt in locElt.iterchildren():
ln = childElt.tag
value = childElt.text.strip()
if ln == u"Href":
href = value
elif ln == u"LocalHref":
localHref = value
elif ln == u"Namespace":
namespaceUri = value
elif ln == u"Prefix":
prefix = value
elif ln == u"AttType":
attType = value
elif ln == u"Family":
family = value
elif ln == u"Elements":
elements = value
elif ln == u"Version":
version = value
if href:
if namespaceUri and (attType == u"SCH" or attType == u"ENT"):
self.standardTaxonomiesDict[namespaceUri].add(href)
if localHref:
self.standardLocalHrefs[namespaceUri].add(localHref)
authority = UrlUtil.authority(namespaceUri)
self.standardAuthorities.add(authority)
if family == u"BASE":
self.baseTaxonomyNamespaces.add(namespaceUri)
if prefix:
self.standardPrefixes[namespaceUri] = prefix
if href not in self.standardTaxonomiesDict:
self.standardTaxonomiesDict[href] = u"Allowed" + attType
if family:
self.familyHrefs[family].add(ErxlLoc(family, version, href, attType, elements, namespaceUri))
elif attType == u"SCH" and family == u"BASE":
self.baseTaxonomyNamespaces.add(namespaceUri)
except (EnvironmentError,
etree.LxmlError), err:
self.modelManager.cntlr.addToLog(u"{0}: import error: {1}".format(basename,err))
etree.clear_error_log()
if file:
file.close()
def loadMappings(self):
basename = os.path.basename(self.mappingsUrl)
self.modelManager.cntlr.showStatus(_(u"parsing {0}").format(basename))
try:
xmldoc = etree.parse(self.mappingsUrl)
for elt in xmldoc.iter(tag=u"mapFile"):
self.mappedFiles[elt.get(u"from")] = elt.get(u"to")
for elt in xmldoc.iter(tag=u"mapPath"):
self.mappedPaths.append((elt.get(u"from"), elt.get(u"to")))
except (EnvironmentError,
etree.LxmlError), err:
self.modelManager.cntlr.addToLog(u"{0}: import error: {1}".format(basename,err))
etree.clear_error_log()
def mappedUrl(self, url):
if url in self.mappedFiles:
mappedUrl = self.mappedFiles[url]
else: # handle mapped paths
mappedUrl = url
for mapFrom, mapTo in self.mappedPaths:
if url.startswith(mapFrom):
mappedUrl = mapTo + url[len(mapFrom):]
break
return mappedUrl
def uriAuthorityValid(self, uri):
return UrlUtil.authority(uri) in self.standardAuthorities
def disallowedHrefOfNamespace(self, href, namespaceUri):
if namespaceUri in self.standardTaxonomiesDict:
if href in self.standardTaxonomiesDict[namespaceUri]:
return False
if namespaceUri in self.standardLocalHrefs and not isHttpUrl(href):
normalizedHref = href.replace(u"\\",u"/")
if any(normalizedHref.endswith(localHref)
for localHref in self.standardLocalHrefs[namespaceUri]):
return False
return False
def hrefValid(self, href):
return href in self.standardTaxonomiesDict
| |
from urlparse import urlparse
from datetime import datetime
import json
import os
import re
import uuid
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import cache
from django.core.cache import InvalidCacheBackendError
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.contrib.auth.models import User
from django.contrib.auth.views import login as django_login, redirect_to_login
from django.contrib.auth.views import logout as django_logout
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect, HttpResponse, Http404,\
HttpResponseServerError, HttpResponseNotFound, HttpResponseBadRequest,\
HttpResponseForbidden
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from couchdbkit import ResourceNotFound
from django.utils.translation import ugettext as _, ugettext_noop
from django.core.urlresolvers import reverse
from django.core.mail.message import EmailMessage
from django.template import loader
from django.template.context import RequestContext
from restkit import Resource
from corehq.apps.accounting.models import Subscription
from corehq.apps.announcements.models import Notification
from corehq.apps.app_manager.models import BUG_REPORTS_DOMAIN
from corehq.apps.app_manager.models import import_app
from corehq.apps.domain.decorators import require_superuser,\
login_and_domain_required
from corehq.apps.domain.utils import normalize_domain_name, get_domain_from_url
from corehq.apps.hqwebapp.encoders import LazyEncoder
from corehq.apps.hqwebapp.forms import EmailAuthenticationForm, CloudCareAuthenticationForm
from corehq.apps.receiverwrapper.models import Repeater
from corehq.apps.reports.util import is_mobile_worker_with_report_access
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import format_username
from corehq.apps.hqwebapp.doc_info import get_doc_info
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import get_url_base, json_response
from corehq.apps.domain.models import Domain
from couchforms.models import XFormInstance
from soil import heartbeat
from soil import views as soil_views
def pg_check():
"""check django db"""
try:
user_count = User.objects.count()
except:
user_count = None
return (user_count is not None, None)
def couch_check():
"""check couch"""
#in reality when things go wrong with couch and postgres (as of this
# writing) - it's far from graceful, so this will # likely never be
# reached because another exception will fire first - but for
# completeness sake, this check is done here to verify our calls will
# work, and if other error handling allows the request to get this far.
try:
xforms = XFormInstance.view('reports_forms/all_forms', limit=1).all()
except:
xforms = None
return (isinstance(xforms, list), None)
def hb_check():
celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)
if celery_monitoring:
try:
cresource = Resource(celery_monitoring, timeout=3)
t = cresource.get("api/workers").body_string()
all_workers = json.loads(t)
bad_workers = []
for hostname, w in all_workers.items():
if not w['status']:
bad_workers.append('* {} celery worker down'.format(hostname))
if bad_workers:
return (False, '\n'.join(bad_workers))
else:
hb = heartbeat.is_alive()
except:
hb = False
else:
try:
hb = heartbeat.is_alive()
except:
hb = False
return (hb, None)
def redis_check():
try:
redis = cache.get_cache('redis')
result = redis.set('serverup_check_key', 'test')
except (InvalidCacheBackendError, ValueError):
result = True # redis not in use, ignore
except:
result = False
return (result, None)
def memcached_check():
try:
memcached = cache.get_cache('default')
uuid_val = uuid.uuid1()
memcached.set('serverup_check_key', uuid_val)
result = memcached.get('serverup_check_key') == uuid_val
except:
result = False
return (result, None)
def server_error(request, template_name='500.html'):
"""
500 error handler.
"""
domain = get_domain_from_url(request.path) or ''
# hat tip: http://www.arthurkoziel.com/2009/01/15/passing-mediaurl-djangos-500-error-view/
t = loader.get_template(template_name)
return HttpResponseServerError(t.render(RequestContext(request,
{'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
'domain': domain
})))
def not_found(request, template_name='404.html'):
"""
404 error handler.
"""
t = loader.get_template(template_name)
return HttpResponseNotFound(t.render(RequestContext(request,
{'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL
})))
def redirect_to_default(req, domain=None):
if not req.user.is_authenticated():
if domain != None:
url = reverse('domain_login', args=[domain])
else:
# this actually gets hijacked by the static site, but is necessary
url = reverse('corehq.apps.hqwebapp.views.landing_page')
else:
if domain:
domain = normalize_domain_name(domain)
domains = [Domain.get_by_name(domain)]
else:
domains = Domain.active_for_user(req.user)
if 0 == len(domains) and not req.user.is_superuser:
return redirect('registration_domain')
elif 1 == len(domains):
if domains[0]:
domain = domains[0].name
if req.couch_user.is_commcare_user():
if not is_mobile_worker_with_report_access(
req.couch_user, domain):
url = reverse("cloudcare_main", args=[domain, ""])
else:
url = reverse("saved_reports", args=[domain])
elif req.couch_user.can_view_reports(domain) or req.couch_user.get_viewable_reports(domain):
url = reverse('corehq.apps.reports.views.default', args=[domain])
else:
url = reverse('corehq.apps.app_manager.views.default', args=[domain])
else:
raise Http404
else:
url = settings.DOMAIN_SELECT_URL
return HttpResponseRedirect(url)
def landing_page(req, template_name="home.html"):
# this view, and the one below, is overridden because
# we need to set the base template to use somewhere
# somewhere that the login page can access it.
if req.user.is_authenticated():
return HttpResponseRedirect(reverse('homepage'))
req.base_template = settings.BASE_TEMPLATE
return django_login(req, template_name=template_name, authentication_form=EmailAuthenticationForm)
def yui_crossdomain(req):
x_domain = """<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<allow-access-from domain="yui.yahooapis.com"/>
<allow-access-from domain="%s"/>
<site-control permitted-cross-domain-policies="master-only"/>
</cross-domain-policy>""" % Site.objects.get(id=settings.SITE_ID).domain
return HttpResponse(x_domain, mimetype="application/xml")
@login_required()
def password_change(req):
user_to_edit = User.objects.get(id=req.user.id)
if req.method == 'POST':
password_form = AdminPasswordChangeForm(user_to_edit, req.POST)
if password_form.is_valid():
password_form.save()
return HttpResponseRedirect('/')
else:
password_form = AdminPasswordChangeForm(user_to_edit)
template_name = "password_change.html"
return render(req, template_name, {"form": password_form})
def server_up(req):
'''View that just returns "success", which can be hooked into server
monitoring tools like: pingdom'''
checkers = {
"heartbeat": {
"always_check": False,
"message": "* celery heartbeat is down",
"check_func": hb_check
},
"postgres": {
"always_check": True,
"message": "* postgres has issues",
"check_func": pg_check
},
"couch": {
"always_check": True,
"message": "* couch has issues",
"check_func": couch_check
},
"redis": {
"always_check": True,
"message": "* redis has issues",
"check_func": redis_check
},
"memcached": {
"always_check": True,
"message": "* memcached has issues",
"check_func": memcached_check
}
}
failed = False
message = ['Problems with HQ (%s):' % os.uname()[1]]
for check, check_info in checkers.items():
if check_info['always_check'] or req.GET.get(check, None) is not None:
check_results, custom_msg = check_info['check_func']()
if not check_results:
failed = True
if custom_msg:
message.append(custom_msg)
else:
message.append(check_info['message'])
if failed:
return HttpResponse('<br>'.join(message), status=500)
else:
return HttpResponse("success")
def no_permissions(request, redirect_to=None, template_name="403.html"):
"""
403 error handler.
"""
t = loader.get_template(template_name)
return HttpResponseForbidden(t.render(RequestContext(request,
{'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL
})))
def _login(req, domain, template_name):
if req.user.is_authenticated() and req.method != "POST":
redirect_to = req.REQUEST.get('next', '')
if redirect_to:
return HttpResponseRedirect(redirect_to)
if not domain:
return HttpResponseRedirect(reverse('homepage'))
else:
return HttpResponseRedirect(reverse('domain_homepage', args=[domain]))
if req.method == 'POST' and domain and '@' not in req.POST.get('username', '@'):
req.POST._mutable = True
req.POST['username'] = format_username(req.POST['username'], domain)
req.POST._mutable = False
req.base_template = settings.BASE_TEMPLATE
context = {}
if domain:
context.update({
'domain': domain,
'next': req.REQUEST.get('next', '/a/%s/' % domain),
})
return django_login(req, template_name=template_name,
authentication_form=EmailAuthenticationForm if not domain else CloudCareAuthenticationForm,
extra_context=context)
def login(req, domain_type='commcare'):
# this view, and the one below, is overridden because
# we need to set the base template to use somewhere
# somewhere that the login page can access it.
domain = req.REQUEST.get('domain', None)
from corehq.apps.domain.utils import get_dummy_domain
# For showing different logos based on CommTrack, CommConnect, CommCare...
dummy_domain = get_dummy_domain(domain_type)
req.project = dummy_domain
return _login(req, domain, "login_and_password/login.html")
def domain_login(req, domain, template_name="login_and_password/login.html"):
project = Domain.get_by_name(domain)
if not project:
raise Http404
# FYI, the domain context_processor will pick this up and apply the
# necessary domain contexts:
req.project = project
return _login(req, domain, template_name)
def is_mobile_url(url):
# Minor hack
return ('reports/custom/mobile' in url)
def logout(req):
referer = req.META.get('HTTP_REFERER')
domain = get_domain_from_url(urlparse(referer).path) if referer else None
# we don't actually do anything with the response here:
django_logout(req, **{"template_name": settings.BASE_TEMPLATE})
if referer and domain and is_mobile_url(referer):
mobile_mainnav_url = reverse('custom_project_report_dispatcher', args=[domain, 'mobile/mainnav'])
mobile_login_url = reverse('domain_mobile_login', kwargs={'domain': domain})
return HttpResponseRedirect('%s?next=%s' % (mobile_login_url, mobile_mainnav_url))
elif referer and domain:
domain_login_url = reverse('domain_login', kwargs={'domain': domain})
return HttpResponseRedirect('%s' % domain_login_url)
else:
return HttpResponseRedirect(reverse('login'))
@login_and_domain_required
def retrieve_download(req, domain, download_id, template="hqwebapp/file_download.html"):
return soil_views.retrieve_download(req, download_id, template)
@require_superuser
def debug_notify(request):
try:
0 / 0
except ZeroDivisionError:
notify_exception(request,
"If you want to achieve a 500-style email-out but don't want the user to see a 500, use notify_exception(request[, message])")
return HttpResponse("Email should have been sent")
@login_required()
@require_POST
def bug_report(req):
report = dict([(key, req.POST.get(key, '')) for key in (
'subject',
'username',
'domain',
'url',
'message',
'app_id',
'cc'
)])
report['user_agent'] = req.META['HTTP_USER_AGENT']
report['datetime'] = datetime.utcnow()
if report['app_id']:
app = import_app(report['app_id'], BUG_REPORTS_DOMAIN)
report['copy_url'] = "%s%s" % (get_url_base(), reverse('view_app', args=[BUG_REPORTS_DOMAIN, app.id]))
else:
report['copy_url'] = None
try:
couch_user = CouchUser.get_by_username(report['username'])
full_name = couch_user.full_name
email = couch_user.get_email()
except Exception:
full_name = None
email = None
report['full_name'] = full_name
report['email'] = email or report['username']
matching_subscriptions = Subscription.objects.filter(
is_active=True,
subscriber__domain=report['domain'],
)
if len(matching_subscriptions) >= 1:
report['software_plan'] = matching_subscriptions[0].plan_version
else:
report['software_plan'] = u'domain has no active subscription'
subject = u'{subject} ({domain})'.format(**report)
message = (
u"username: {username}\n"
u"full name: {full_name}\n"
u"domain: {domain}\n"
u"software plan: {software_plan}\n"
u"url: {url}\n"
u"copy url: {copy_url}\n"
u"datetime: {datetime}\n"
u"User Agent: {user_agent}\n"
u"Message:\n\n"
u"{message}\n"
).format(**report)
cc = report['cc'].strip().split(",")
cc = filter(None, cc)
if full_name and not any([c in full_name for c in '<>"']):
reply_to = u'"{full_name}" <{email}>'.format(**report)
else:
reply_to = report['email']
# if the person looks like a commcare user, fogbugz can't reply
# to their email, so just use the default
if settings.HQ_ACCOUNT_ROOT in reply_to:
reply_to = settings.SERVER_EMAIL
if req.POST.get('five-hundred-report'):
message = "%s \n\n This messge was reported from a 500 error page! Please fix this ASAP (as if you wouldn't anyway)..." % message
email = EmailMessage(
subject=subject,
body=message,
to=settings.BUG_REPORT_RECIPIENTS,
headers={'Reply-To': reply_to},
cc=cc
)
uploaded_file = req.FILES.get('report_issue')
if uploaded_file:
filename = uploaded_file.name
content = uploaded_file.read()
email.attach(filename=filename, content=content)
# only fake the from email if it's an @dimagi.com account
if re.search('@dimagi\.com$', report['username']):
email.from_email = report['username']
else:
email.from_email = settings.CCHQ_BUG_REPORT_EMAIL
email.send(fail_silently=False)
if req.POST.get('five-hundred-report'):
messages.success(req,
"Your CommCare HQ Issue Report has been sent. We are working quickly to resolve this problem.")
return HttpResponseRedirect(reverse('homepage'))
return HttpResponse()
@login_required()
@require_POST
def dismiss_notification(request):
note_id = request.POST.get('note_id', None)
note = Notification.get(note_id)
if note:
if note.user != request.couch_user.username:
return json_response({"status": "failure: Not the same user"})
note.dismissed = True
note.save()
return json_response({"status": "success"})
return json_response({"status": "failure: No note by that name"})
def render_static(request, template):
"""
Takes an html file and renders it Commcare HQ's styling
"""
return render(request, "hqwebapp/blank.html", {'tmpl': template})
def eula(request):
return render_static(request, "eula.html")
def cda(request):
return render_static(request, "cda.html")
def apache_license(request):
return render_static(request, "apache_license.html")
def bsd_license(request):
return render_static(request, "bsd_license.html")
def product_agreement(request):
return render_static(request, "product_agreement.html")
def unsubscribe(request, user_id):
# todo in the future we should not require a user to be logged in to unsubscribe.
from django.contrib import messages
from corehq.apps.settings.views import MyAccountSettingsView
messages.info(request,
_('Check "Opt out of emails about new features '
'and other CommCare updates" in your account '
'settings and then click "Update Information" '
'if you do not want to receive future emails '
'from us.'))
return HttpResponseRedirect(reverse(MyAccountSettingsView.urlname))
class BasePageView(TemplateView):
urlname = None # name of the view used in urls
page_title = None # what shows up in the <title>
template_name = 'hqwebapp/base_page.html'
@property
def page_name(self):
"""
This is what is visible to the user.
page_title is what shows up in <title> tags.
"""
return self.page_title
@property
def page_url(self):
raise NotImplementedError()
@property
def parent_pages(self):
"""
Specify parent pages as a list of
[{
'title': <name>,
'url: <url>,
}]
"""
return []
@property
def main_context(self):
"""
The shared context for rendering this page.
"""
return {
'current_page': {
'page_name': self.page_name,
'title': self.page_title,
'url': self.page_url,
'parents': self.parent_pages,
},
}
@property
def page_context(self):
"""
The Context for the settings page
"""
return {}
def get_context_data(self, **kwargs):
context = super(BasePageView, self).get_context_data(**kwargs)
context.update(self.main_context)
context.update(self.page_context)
return context
def render_to_response(self, context, **response_kwargs):
"""
Returns a response with a template rendered with the given context.
"""
return render(self.request, self.template_name, context)
class BaseSectionPageView(BasePageView):
section_name = ""
template_name = "hqwebapp/base_section.html"
@property
def section_url(self):
raise NotImplementedError
@property
def main_context(self):
context = super(BaseSectionPageView, self).main_context
context.update({
'section': {
'page_name': self.section_name,
'url': self.section_url,
}
})
return context
class PaginatedItemException(Exception):
pass
class CRUDPaginatedViewMixin(object):
"""
Mix this in with a TemplateView view object.
For usage tips, see the docs for UI Helpers > Paginated CRUD View.
"""
DEFAULT_LIMIT = 10
limit_text = ugettext_noop("items per page")
empty_notification = ugettext_noop("You have no items.")
loading_message = ugettext_noop("Loading...")
deleted_items_header = ugettext_noop("Deleted Items:")
new_items_header = ugettext_noop("New Items:")
def _safe_escape(self, expression, default):
try:
return expression()
except ValueError:
return default
@property
def parameters(self):
"""
Specify GET or POST from a request object.
"""
raise NotImplementedError("you need to implement get_param_source")
@property
@memoized
def page(self):
return self._safe_escape(
lambda: int(self.parameters.get('page', 1)),
1
)
@property
@memoized
def limit(self):
return self._safe_escape(
lambda: int(self.parameters.get('limit', self.DEFAULT_LIMIT)),
self.DEFAULT_LIMIT
)
@property
def total(self):
raise NotImplementedError("You must implement total.")
@property
def sort_by(self):
return self.parameters.GET.get('sortBy', 'abc')
@property
def skip(self):
return (self.page - 1) * self.limit
@property
def action(self):
action = self.parameters.get('action')
if action not in self.allowed_actions:
raise Http404()
return action
@property
def column_names(self):
raise NotImplementedError("you must return a list of column names")
@property
def pagination_context(self):
create_form = self.get_create_form()
return {
'pagination': {
'page': self.page,
'limit': self.limit,
'total': self.total,
'limit_options': range(self.DEFAULT_LIMIT, 51, self.DEFAULT_LIMIT),
'column_names': self.column_names,
'num_columns': len(self.column_names),
'text': {
'limit': self.limit_text,
'empty': self.empty_notification,
'loading': self.loading_message,
'deleted_items': self.deleted_items_header,
'new_items': self.new_items_header,
},
'create_item_form': self.get_create_form_response(create_form) if create_form else None,
}
}
@property
def allowed_actions(self):
return [
'create',
'update',
'delete',
'paginate',
'refresh',
]
@property
def paginate_crud_response(self):
"""
Return this in the post method of your view class.
"""
response = getattr(self, '%s_response' % self.action)
return HttpResponse(json.dumps(response, cls=LazyEncoder))
@property
def create_response(self):
create_form = self.get_create_form()
new_item = None
if create_form.is_valid():
new_item = self.get_create_item_data(create_form)
create_form = self.get_create_form(is_blank=True)
return {
'newItem': new_item,
'form': self.get_create_form_response(create_form)
}
@property
def update_response(self):
update_form = self.get_update_form()
updated_item = None
if update_form.is_valid():
updated_item = self.get_updated_item_data(update_form)
return {
'updatedItem': updated_item,
'form': self.get_update_form_response(update_form),
}
@property
def refresh_response(self):
try:
self.refresh_item(self.item_id)
except PaginatedItemException as e:
return {
'error': _("<strong>Problem Refreshing List:</strong> %s") % e,
}
return {
'success': True,
'currentPage': self.page,
'total': self.total,
'paginatedList': list(self.paginated_list),
}
@property
def delete_response(self):
try:
response = self.get_deleted_item_data(self.item_id)
return {
'deletedItem': response
}
except PaginatedItemException as e:
return {
'error': _("<strong>Problem Deleting:</strong> %s") % e,
}
@property
def item_id(self):
try:
return self.parameters['itemId']
except KeyError:
raise PaginatedItemException(_("The item's ID was not passed to the server."))
@property
def paginate_response(self):
return {
'success': True,
'currentPage': self.page,
'total': self.total,
'paginatedList': list(self.paginated_list),
}
@property
def paginated_list(self):
"""
This should return a list (or generator object) of data formatted as follows:
[
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
]
"""
raise NotImplementedError("Return a list of data for the request response.")
def get_create_form(self, is_blank=False):
"""
This should be a crispy form that creates an item.
It's not required if you just want a paginated view.
"""
pass
def get_create_form_response(self, create_form):
return render_to_string(
'hqwebapp/partials/create_item_form.html', {
'form': create_form
}
)
def get_update_form(self, initial_data=None):
raise NotImplementedError("You must return a form object that will update an Item")
def get_update_form_response(self, update_form):
return render_to_string(
'hqwebapp/partials/update_item_form.html', {
'form': update_form
}
)
def refresh_item(self, item_id):
"""
Process the item that triggered a list refresh here.
"""
raise NotImplementedError("You must implement refresh_item")
def get_create_item_data(self, create_form):
"""
This should return a dict of data for the created item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_new_item_data")
def get_updated_item_data(self, update_form):
"""
This should return a dict of data for the updated item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_updated_item_data")
def get_deleted_item_data(self, item_id):
"""
This should return a dict of data for the deleted item.
{
'itemData': {
'id': <id of item>,
<json dict of item data for the knockout model to use>
},
'template': <knockout template id>
}
"""
raise NotImplementedError("You must implement get_deleted_item_data")
@login_required
def quick_find(request):
query = request.GET.get('q')
redirect = request.GET.get('redirect') != 'false'
if not query:
return HttpResponseBadRequest('GET param "q" must be provided')
def deal_with_couch_doc(doc):
domain = doc.get('domain') or doc.get('domains', [None])[0]
if request.couch_user.is_superuser or (domain and request.couch_user.is_domain_admin(domain)):
doc_info = get_doc_info(doc, domain_hint=domain)
else:
raise Http404()
if redirect and doc_info.link:
messages.info(request, _("We've redirected you to the %s matching your query") % doc_info.type_display)
return HttpResponseRedirect(doc_info.link)
else:
return json_response(doc_info)
try:
doc = get_db().get(query)
except ResourceNotFound:
pass
else:
return deal_with_couch_doc(doc)
try:
doc = Repeater.get_db().get(query)
except ResourceNotFound:
pass
else:
return deal_with_couch_doc(doc)
raise Http404()
def osdd(request, template='osdd.xml'):
response = render(request, template, {'url_base': get_url_base()})
response['Content-Type'] = 'application/xml'
return response
| |
import random, math, copy
import numpy as np
class BinaryNode(object):
def __init__(self, value = None, arity = 2, children = []):
self.value = value
self.arity = arity
self.children = children
def __repr__(self, level=0):
ret = "\t"*level+repr(self.value)+"\n"
for child in self.children:
ret += child.__repr__(level+1)
return ret
def evaluate(self, x):
if self.value == 'x':
return x
elif self.value == '+':
return self.children[0].evaluate(x) + self.children[1].evaluate(x)
elif self.value == '-':
return self.children[0].evaluate(x) - self.children[1].evaluate(x)
elif self.value == '*':
return self.children[0].evaluate(x) * self.children[1].evaluate(x)
elif self.value == '/':
try:
return self.children[0].evaluate(x) / self.children[1].evaluate(x)
except ZeroDivisionError:
return 1.0
else:
return float(self.value)
def flatten(self):
left = []
parents = []
bst = self
parents.append(bst)
def descend_left(bst):
while bst.children:
parents.append(bst.children[0])
parents.append(bst.children[1])
bst = bst.children[0]
descend_left(bst)
while parents:
bst = parents.pop()
left.append(bst.value)
if bst.children:
descend_left(bst.children[1])
return left
def writeFunc(self, array):
if len(self.children) == 2:
array.append('(')
self.children[0].writeFunc(array)
array.append(self.value)
self.children[1].writeFunc(array)
array.append(')')
else:
array.append(self.value)
return array
def addBinaryNode(self, binaryNode):
if len(self.children) < self.arity:
#print 'ADICIONOU: ', binaryNode.value
self.children.append(binaryNode)
return True
elif self.arity > 0:
for child in self.children:
if child.addBinaryNode(binaryNode):
return True
else:
#print 'FALHOU: ', binaryNode.value
return False
def getSizeI(self):
counter = len(self.children)
for child in self.children:
counter += child.getSize()
return counter
def getSize(self):
results = []
nodes = self.children
while 1:
newNodes = []
if len(nodes) == 0:
break
for node in nodes:
results.append(node.value)
if len(node.children) > 0:
for child in node.children:
newNodes.append(child)
nodes = newNodes
return len(results)
def writeFuncIt(self):
results = []
results.append(self.value)
nodes = self.children
while 1:
newNodes = []
if len(nodes) == 0:
break
for node in nodes:
results.append(node.value)
if len(node.children) > 0:
for child in node.children:
newNodes.append(child)
nodes = newNodes
return results
def evalWriteFunc(self, array, listOperations):
indexOp = 0
indexCh = 0
sortedArray = []
sortedArray.append(array.pop(indexOp))
while indexCh < len(array):
if [(array[indexOp]), 2] in listOperations:
indexCh += 1
sortedArray.insert(indexOp, array.pop(indexCh-1))
sortedArray.insert(indexOp + 2, array.pop(indexCh))
def selectNode(self, counter):
if counter == 0:
return self
if len(self.children) >= counter:
return self.children[counter-1]
else:
counter -= len(self.children)
for child in self.children:
if child.getSize() >= counter:
return child.selectNode(counter)
else:
counter -= child.getSize()
def selectRandomNode(self):
size = self.getSize()
counter = random.randint(1, size)
results = []
nodes = self.children
while 1:
newNodes = []
for node in nodes:
counter -= 1
if counter == 0:
return node
#results.append(node.value)
if len(node.children) > 0:
for child in node.children:
newNodes.append(child)
nodes = newNodes
def mutation(self, counter, listOperations, listTerminals):
toBeMutated = self.selectNode(counter)
if [toBeMutated.value, len(toBeMutated.children)] in listOperations:
random_Operation = random.randrange(0,len(listOperations))
print (listOperations[random_Operation][0])
toBeMutated.value = listOperations[random_Operation][0]
else:
random_Terminal = random.randrange(0,len(listTerminals))
toBeMutated.value = listTerminals[random_Terminal]
toBeMutated.children = []
def crossOver(self, counter, t2):
if counter == 0:
return
elif len(self.children) >= counter:
counter = 0
self.children[counter-1] = t2.selectRandomNode()
else:
counter -= len(self.children)
for child in self.children:
if child.getSize() >= counter:
return child.crossOver(counter, t2)
else:
counter -= child.getSize()
return
def calcFitness(self, x, func):
return np.sum(np.power(self.evaluate(x)-func,2))
# return np.sum(np.power(eval(''.join([str(y) for y in self.writeFunc([])]))-func, 2))
#def generateRandomTree(listOperations, listTerminals):
# random_Operation = random.randrange(0,len(listOperations))
# newNode = BinaryNode(listOperations[random_Operation][0],listOperations[random_Operation][1], [])
# emptyTerminals = listOperations[random_Operation][1]
# while True:
# decisionMaking = random.randint(1, 100)
# if emptyTerminals == 0:
# break
# #The probability of adding a terminal needs to be bigger than the one to add a function.
# elif decisionMaking < 40:
# random_Operation = random.randrange(0,len(listOperations))
# if newNode.addBinaryNode(BinaryNode(listOperations[random_Operation][0], listOperations[random_Operation][1], [])):
# emptyTerminals += listOperations[random_Operation][1] - 1
# else:
# random_Terminal = random.randrange(0,len(listTerminals))
# if newNode.addBinaryNode(BinaryNode(listTerminals[random_Terminal], 0, [])):
# emptyTerminals -= 1
## print emptyTerminals
# return newNode
class Population(object):
def __init__(self, array = []):
self.array = array
def appendRandomTree(self, listOperations, listTerminals, x):
random_Operation = random.randrange(0,len(listOperations))
newNode = BinaryNode(listOperations[random_Operation][0],listOperations[random_Operation][1], [])
emptyTerminals = listOperations[random_Operation][1]
while True:
decisionMaking = random.randint(1, 100)
if emptyTerminals == 0:
break
#The probability of adding a terminal needs to be bigger than the one to add a function.
elif decisionMaking < 40:
random_Operation = random.randrange(0,len(listOperations))
if newNode.addBinaryNode(BinaryNode(listOperations[random_Operation][0], listOperations[random_Operation][1], [])):
emptyTerminals += listOperations[random_Operation][1] - 1
else:
random_Terminal = random.randrange(0,len(listTerminals))
if newNode.addBinaryNode(BinaryNode(listTerminals[random_Terminal], 0, [])):
emptyTerminals -= 1
# print emptyTerminals
# if (newNode.getSize() > 100):
# return self.appendRandomTree(listOperations, listTerminals, x)
self.array.append({'fitness':newNode.calcFitness(x, func),'tree':newNode})
def initPopulation(self, size, listOperations, listTerminals, x):
for i in range(0, size):
self.appendRandomTree(listOperations, listTerminals, x)
def sortArray(self):
tmp = sorted(self.array, key= lambda ind: ind['fitness'])
self.array = tmp[:]
def reproduction(self, kept, cycles, x):
count = 0
difference = 1000000000.0
while(count < cycles):
for i in range(1, len(self.array)):
self.array.pop(i)
self.appendRandomTree(lO, lT, x)
self.sortArray()
for i in range(0, kept):
self.array.pop()
newdict = copy.deepcopy(self.array[i])
self.array.append(newdict)
# CROSSOVER
crossPartner = random.randint(i+1,len(self.array)-1)
# print "CROSS PARTNER: ", crossPartner
#crossPartnerIndex = random.randint(2, self.array[crossPartner]['tree'].getSize())
crossIndex = random.randint(2, self.array[i]['tree'].getSize())
#print "CROSS PARTNER: ", crossPartner, "CROSSINDEX: ", crossPartnerIndex
#print "BEFORE[",i,"] :", self.array[i], "\nsize: ", self.array[i]['tree'].getSize()
#print "PARTNER[",i,"] :", self.array[crossPartner], "\nsize: ", self.array[crossPartner]['tree'].getSize()
self.array[i]['tree'].crossOver(crossIndex, self.array[crossPartner]['tree'])
self.array[i]['fitness'] = self.array[i]['tree'].calcFitness(x, func)
#print "AFTER[",i,"] :", self.array[i], "\nsize: ", self.array[i]['tree'].getSize()
#print "LASTPOSITION[",i,"] :", self.array[len(self.array)-1], "\nsize: ", self.array[len(self.array)-1]['tree'].getSize()
# MUTATION
# mutationSelection = random.randint(kept, len(self.array)-1)
self.sortArray()
difference = self.array[0]['fitness']
count += 1
print (difference, count)
# size = 0
# difference = 0
return count
x_array = np.array([-1000.0, -500.0, -100.0, 0.0, 100.0, 500.0, 1000.0])
func = eval('x_array**3+x_array**2+x_array+1')
lO = [['+', 2], ['*', 2], ['-', 2]]
lT = ['x', 1.0, 2.0]
lT2 = ['x', 3.0, 5.0]
#for i in range(1, 10):
# newNode = generateRandomTree(lO, lT)
# geno.append(newNode)
#for i in range(1, 10):
# print geno[i-1], "with size: ", geno[i-1].size
population = Population()
population.initPopulation(50, lO, lT, x_array)
#population.sortArray()
size = 0
#print "SELECTED: ", population.array[0]['tree'].selectNode(3)
#print ("GENERATIONS: ", population.reproduction(10, 500, x_array))
print ("BEST INDIVIDUAL: ", population.array[0], "SIZE: ", population.array[0]['tree'].getSize(), "FITNESS: ", population.array[0]['tree'].calcFitness(x_array, func))
print ("FLATTEN: ", population.array[0]['tree'].flatten())
#print "writeFuncIt: ", population.array[0]['tree'].writeFuncIt()
#b = population.array[0]['tree'].writeFuncIt()
#b.reverse()
#print b
#print "writeFunc: ", ''.join([str(x) for x in population.array[0]['tree'].writeFunc(arraywf)])
#print "eval: ", eval(''.join([str(x) for x in population.array[0]['tree'].writeFunc(arraywf)]))
#print "eval original: ", np.sum(np.power(population.array[0]['tree'].evaluate(x)-func, 2))
#population.array[0]['tree'].mutation(2, lO, lT)
#print "SIZE: ", population.array[0]['tree'].getSize()
#print "MUTATED: ", population.array[0]
#population = []
#for i in range(0, 10):
# new = generateRandomTree(lO, lT)
# population.append({'fitness':np.sum(np.power(new.evaluate(x)-func,2)),'tree':new})
# newlist = list(sorted(population, key=lambda ind: ind['fitness']))
# population = list(newlist)
# difference = population[0]['fitness']
#
#def reproduction(population, difference):
# temp = []
# count = 0
# while(difference > 1.0):
# for i in range(2, 10):
# new = generateRandomTree(lO, lT)
# population[i] = ({'fitness':np.sum(np.power(new.evaluate(x)-func,2)),'tree':new})
# newlist = sorted(population, key=lambda ind: ind['fitness'])
# #for i in range(0, len(population)-2):
# # temp.append({'fitness':newlist[i].get('fitness'),'tree':newlist[i].get('tree')})
# temp = newlist[:]
# population = newlist[:]
# tempdict = dict([('fitness',temp[0].get('fitness')), ('tree',temp[0].get('tree'))])
# print "TEMP DICT: ", tempdict
# #temp = list(newlist)
# #temp.pop()
# #temp.pop()
# print "BEFORE :", population[0], "\nsize: ", population[0]['tree'].getSize()
# print "POPULATION[1] :", population[1], "\nsize: ", population[1]['tree'].getSize()
# cross1Index = random.randint(1, population[1]['tree'].getSize())
# # cross2Index = random.randint(1, population[3]['tree'].getSize())
# population[0]['tree'].crossOver(cross1Index, population[1]['tree'])
# # population[1]['tree'].crossOver(cross2Index, temp[3]['tree'])
# print "cross1Index :", cross1Index
# # print "cross2Index :", cross2Index
# print "AFTER:", population[0], "\nsize: ", population[0]['tree'].getSize()
# # print "POPULATION[1] :", population[1], "\nsize: ", population[1]['tree'].getSize()
# temp.append(tempdict)
# # temp.append({'fitness':population[0].get('fitness'),'tree':population[0].get('tree')})
# # temp.append({'fitness':population[1].get('fitness'),'tree':population[1].get('tree')})
# # print "TEMP:\n"
# print "TEMP[8] :", temp[8]['tree'], "\nsize: ", temp[8]['tree'].getSize()
# # print "TEMP[9] :", temp[9], "\nsize: ", temp[9]['tree'].getSize()
# print "TEMP[0] :", temp[0], "\nsize: ", temp[0]['tree'].getSize()
# # print "TEMP[1] :", temp[1], "\nsize: ", temp[1]['tree'].getSize()
#
# # print "cross1Index :", cross1Index
# # print "cross2Index :", cross2Index
# # print "POPULATION[0] :", population[0]['tree'], "\nsize: ", population[0]['tree'].getSize()
# # print "POPULATION[1] :", population[1], "\nsize: ", population[1]['tree'].getSize()
# # print "POPULATION[8] :", population[8], "\nsize: ", population[8]['tree'].getSize()
# # print "POPULATION[9] :", population[9], "\nsize: ", population[9]['tree'].getSize()
# #newlist = list(sorted(temp, key=lambda ind: ind['fitness']))
# #population = list(newlist)
## difference = population[0]['fitness']
# difference = 0
# count += 1
## print population, "COUNT: ", count
#reproduction(population, difference)
# new=generateRandomTree(lO, lT)
# almostthere = np.power(new.evaluate(x)-func,2)
# print almostthere
# difference = np.sum(almostthere)
# print difference
# count += 1
# if difference < 0:
# difference = 10
#print new, "".join([str(a) for a in new.writeFunc([])]), "\nCounter: ", count, "\nDifference:", difference
#new = generateRandomTree(lO, lT)
#new2 = generateRandomTree(lO, lT2)
#print "ORIGINAL: \n", new, "size: ", new.getSize(), "NOTA: ", new.evaluate(x)
#print "NEW BRANCH: \n", new2, "size: ", new2.getSize(), "NOTA: ", new2.evaluate(x)
#new.crossOver(3, new2)
#print "THE NEW TREE: \n", new, "\nsize: ", new.getSize(), "\nNOTA: ", new.evaluate(x), "\nFUNC: ", "".join([str(a) for a in new.writeFunc([])])
#print "Difference: ", np.sum((new.evaluate(x)-func)**2)
#node = BinaryNode('*', 2)
#node.addBinaryNode(BinaryNode(10, 0, []))
#node.addBinaryNode(BinaryNode('+', 2, []))
#node.addBinaryNode(BinaryNode(3, 0, []))
#node.addBinaryNode(BinaryNode(2, 0, []))
#print node
#print node.evaluate()
| |
from multiprocessing import set_start_method, cpu_count
# set_start_method('forkserver')
# import os
# os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
ap = ArgumentParser()
ap.add_argument('-ds', '--data_set', type=str, required=False, default='', help='The csv file containing the data with which to train.')
ap.add_argument('-rs', '--random_state', type=int, required=False, default=42, help='Integer value to initialize train/test splitting randomization')
ap.add_argument('-pp', '--pre_process', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to MinMax-preprocess the features.')
ap.add_argument('-pca', '--pca_transform', type=str2bool, nargs='?', required=False, default=True, help='Toggle whether to PCA-pretransform the features.')
ap.add_argument('-v', '--verbose', type=str2bool, nargs='?', required=False, default=False, help='Whether to set verbosity = True or False (default).')
try:
args = vars(ap.parse_args())
except:
args = {}
args['random_state'] = ap.get_default('random_state')
args['pre_process'] = ap.get_default('pre_process')
args['pca_transform'] = ap.get_default('pca_transform')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
do_pp = args['pre_process']
do_pca = args['pca_transform']
verbose = args['verbose']
data_set_fname = args['data_set']
print('BEGIN BIG COPY PASTE ')
import pandas as pd
import numpy as np
import pdb
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
import xgboost as xgb
from tqdm import tqdm
from glob import glob
from time import time
start0 = time()
def setup_features(dataRaw, label='flux', notFeatures=[], pipeline=None, verbose=False, resample=False, returnAll=None):
"""Example function with types documented in the docstring.
For production level usage: All scaling and transformations must be done
with respect to the calibration data distributions
Args:
features (nD-array): Array of input raw features.
labels (1D-array): The second parameter.
pipeline (int): The first parameter.
label_scaler (str): The second parameter.
feature_scaler (str): The second parameter.
Returns:
features_transformed, labels_scaled
.. _PEP 484:
https://github.com/ExoWanderer/
"""
# if label in notFeatures: notFeatures.remove(label)
if isinstance(dataRaw,str):
dataRaw = pd.read_csv(filename)
elif isinstance(dataRaw, dict):
dataRaw = pd.DataFrame(dataRaw)
elif not isinstance(dataRaw, pd.DataFrame):
raise TypeError('The input must be a `pandas.DataFrame` or a `dict` with Equal Size Entries (to convert to df here)')
# WHY IS THIS ALLOWED TO NOT HAVE PARENTHESES?
# assert isinstance(dataRaw, pd.DataFrame), 'The input must be a Pandas DataFrame or Dictionary with Equal Size Entries'
inputData = dataRaw.copy()
# PLDpixels = pd.DataFrame({key:dataRaw[key] for key in dataRaw.columns if 'pix' in key})
pixCols = [colname for colname in inputData.columns if 'pix' in colname.lower() or 'pld' in colname.lower()]
PLDnorm = np.sum(np.array(inputData[pixCols]),axis=1)
inputData[pixCols] = (np.array(inputData[pixCols]).T / PLDnorm).T
# # Overwrite the PLDpixels entries with the normalized version
# for key in dataRaw.columns:
# if key in PLDpixels.columns:
# inputData[key] = PLDpixels[key]
#
# Assign the labels
n_PLD = len([key for key in dataRaw.keys() if 'err' not in colname.lower() and ('pix' in key.lower() or 'pld' in key.lower())])
input_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' not in colname.lower()]
errors_labels = [colname for colname in dataRaw.columns if colname not in notFeatures and 'err' in colname.lower()]
# resampling_inputs = ['flux', 'xpos', 'ypos', 'xfwhm', 'yfwhm', 'bg_flux', 'bmjd', 'np'] + ['pix{}'.format(k) for k in range(1,10)]
# resampling_errors = ['fluxerr', 'xerr', 'yerr', 'xerr', 'yerr', 'sigma_bg_flux', 'bmjd_err', 'np_err'] + ['fluxerr']*n_PLD
start = time()
if resample:
print("Resampling ", end=" ")
inputData = pd.DataFrame({colname:np.random.normal(dataRaw[colname], dataRaw[colerr]) \
for colname, colerr in tqdm(zip(input_labels, errors_labels), total=len(input_labels))
})
print("took {} seconds".format(time() - start))
else:
inputData = pd.DataFrame({colname:dataRaw[colname] for colname in input_labels})
labels = dataRaw[label].values
# explicitly remove the label
if label in inputData.columns: inputData.drop(label, axis=1, inplace=True)
feature_columns = [colname for colname in inputData.columns if colname not in notFeatures]
features = inputData[feature_columns].values
if verbose: print('Shape of Features Array is', features.shape)
if verbose: start = time()
# labels_scaled = labels# label_scaler.fit_transform(labels[:,None]).ravel() if label_scaler is not None else labels
features_trnsfrmd = pipeline.fit_transform(features) if pipeline is not None else features
if verbose: print('took {} seconds'.format(time() - start))
collection = features_trnsfrmd, labels
if returnAll == True:
collection = features_trnsfrmd, labels, pipeline
if returnAll == 'features':
collection = features_trnsfrmd
if returnAll == 'with raw data':
collection.append(dataRaw)
return collection
# ## Load CSVs data
tobe_flux_normalized = ['fluxerr', 'bg_flux', 'sigma_bg_flux', 'flux']
spitzerCalNotFeatures = ['flux', 'fluxerr', 'dn_peak', 'xycov', 't_cernox', 'xerr', 'yerr', 'sigma_bg_flux']
spitzerCalFilename = 'pmap_ch2_0p1s_x4_rmulti_s3_7.csv' if data_set_fname == '' else data_set_fname
spitzerCalRawData = pd.read_csv(spitzerCalFilename)
for key in tobe_flux_normalized:
spitzerCalRawData[key] = spitzerCalRawData[key] / np.median(spitzerCalRawData['flux'].values)
spitzerCalRawData['bmjd_err'] = np.median(0.5*np.diff(spitzerCalRawData['bmjd']))
spitzerCalRawData['np_err'] = np.sqrt(spitzerCalRawData['yerr'])
for colname in spitzerCalRawData.columns:
if 'err' not in colname.lower() and ('pix' in colname.lower() or 'pld' in colname.lower()):
spitzerCalRawData[colname+'_err'] = spitzerCalRawData[colname] * spitzerCalRawData['fluxerr']
start = time()
print("Transforming Data ", end=" ")
operations = []
# header = 'GBR' if do_gbr else 'RFI' if do_rfi else 'STD'
pipe = Pipeline(operations) if len(operations) else None
not_features_now = []
for feat_name in spitzerCalNotFeatures:
if feat_name in spitzerCalRawData.columns:
not_features_now.append(feat_name)
features, labels, pipe_fitted = setup_features( dataRaw = spitzerCalRawData,
pipeline = pipe,
verbose = verbose,
notFeatures = not_features_now,
resample = False,
returnAll = True)
n_samples = len(features)
print("Establishing the Pipeline Components.")
std_scaler_from_raw = StandardScaler()
pca_transformer_from_std_scaled = PCA()
minmax_scaler_transformer_raw = MinMaxScaler()
minmax_scaler_transformer_pca = MinMaxScaler()
operations = []
operations.append(('std_scaler', StandardScaler()))
operations.append(('pca_transform', PCA()))
operations.append(('minmax_scaler', MinMaxScaler()))
print("Establishing the Full Pipeline.")
full_pipe = Pipeline(operations)
print("Transforming Raw Data in Multiple Paths.")
full_pipe_transformed_features = full_pipe.fit_transform(features)
standard_scaled_features = std_scaler_from_raw.fit_transform(features)
pca_standard_scaled_features = pca_transformer_from_std_scaled.fit_transform(standard_scaled_features)
minmax_scaled_features_raw = minmax_scaler_transformer_raw.fit_transform(features)
minmax_scaled_features_pca = minmax_scaler_transformer_pca.fit_transform(pca_standard_scaled_features)
print("Storing pipelines on disk vis joblib.")
joblib.dump(full_pipe, 'pmap_full_pipe_transformer_16features.joblib.save')
joblib.dump(std_scaler_from_raw, 'pmap_standard_scaler_transformer_16features.joblib.save')
joblib.dump(pca_transformer_from_std_scaled, 'pmap_pca_transformer_from_stdscaler_16features.joblib.save')
joblib.dump(minmax_scaler_transformer_raw, 'pmap_minmax_scaler_transformer_from_raw_16features.joblib.save')
joblib.dump(minmax_scaler_transformer_pca, 'pmap_minmax_scaler_transformer_from_pca_16features.joblib.save')
print("Saving raw and transformed features to DataFrame csv.")
label_errs = spitzerCalRawData['fluxerr'].values / np.median(spitzerCalRawData['flux'])
# Output flux labels
flux_pmap_save_name = 'pmap_raw_labels_and_errors.csv'
flux_dict = dict(Flux=labels, Flux_err=label_errs)
flux_df = pd.DataFrame(flux_dict, index=range(n_samples))
flux_df.to_csv(flux_pmap_save_name, index=True, index_label='idx')
pd.DataFrame(features, index=range(n_samples)).to_csv('pmap_raw_16features.csv', index=True, index_label='idx')
pd.DataFrame(full_pipe_transformed_features, index=range(n_samples)).to_csv('pmap_full_pipe_transformed_16features.csv', index=True, index_label='idx')
pd.DataFrame(standard_scaled_features, index=range(n_samples)).to_csv('pmap_standard_scaled_16features.csv', index=True, index_label='idx')
pd.DataFrame(pca_standard_scaled_features, index=range(n_samples)).to_csv('pmap_pca_transformed_from_stdscaler_16features.csv', index=True, index_label='idx')
pd.DataFrame(minmax_scaled_features_raw, index=range(n_samples)).to_csv('pmap_minmax_transformed_from_raw_16features.csv', index=True, index_label='idx')
pd.DataFrame(minmax_scaled_features_pca, index=range(n_samples)).to_csv('pmap_minmax_transformed_from_pca_16features.csv', index=True, index_label='idx')
| |
import collections
from flask import url_for, request, Markup
from .utils import freeze_dict, join_html_attrs
class Item(object):
"""The navigation item object.
:param label: the display label of this navigation item.
:param endpoint: the unique name of this navigation item.
If this item point to a internal url, this parameter
should be acceptable for ``url_for`` which will generate
the target url.
:param args: optional. If this parameter be provided, it will be passed to
the ``url_for`` with ``endpoint`` together.
Maybe this arguments need to be decided in the Flask app
context, then this parameter could be a function to delay the
execution.
:param url: optional. If this parameter be provided, the target url of
this navigation will be it. The ``endpoint`` and ``args`` will
not been used to generate url.
:param html_attrs: optional. This :class:`dict` will be used for
representing html.
The ``endpoint`` is the identity name of this navigation item. It will be
unique in whole application. In mostly situation, it should be a endpoint
name of a Flask view function.
"""
def __init__(self, label, endpoint, args=None, url=None, html_attrs=None,
items=None):
self.label = label
self.endpoint = endpoint
self._args = args
self._url = url
self.html_attrs = {} if html_attrs is None else html_attrs
self.items = ItemCollection(items or None)
def __html__(self):
attrs = dict(self.html_attrs)
# adds ``active`` to class list
html_class = attrs.get('class', [])
if self.is_active:
html_class.append('active')
# joins class list
attrs['class'] = ' '.join(html_class)
if not attrs['class']:
del attrs['class']
attrs['href'] = self.url
attrs_template, attrs_values = join_html_attrs(attrs)
return Markup('<a %s>{label}</a>' % attrs_template).format(
*attrs_values, label=self.label)
def __html_format__(self, format_spec):
if format_spec == 'li':
li_attrs = Markup(' class="active"') if self.is_active else ''
return Markup('<li{0}>{1}</li>').format(li_attrs, self.__html__())
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
@property
def args(self):
"""The arguments which will be passed to ``url_for``.
:type: :class:`dict`
"""
if self._args is None:
return {}
if callable(self._args):
return dict(self._args())
return dict(self._args)
@property
def url(self):
"""The final url of this navigation item.
By default, the value is generated by the :attr:`self.endpoint` and
:attr:`self.args`.
.. note::
The :attr:`url` property require the app context without a provided
config value :const:`SERVER_NAME`, because of :func:`flask.url_for`.
:type: :class:`str`
"""
if self.is_internal:
return url_for(self.endpoint, **self.args)
return self._url
@property
def is_active(self):
"""``True`` if the item should be presented as active, and ``False``
always if the request context is not bound.
"""
return bool(request and self.is_current)
@property
def is_internal(self):
"""``True`` if the target url is internal of current app."""
return self._url is None
@property
def is_current(self):
"""``True`` if current request has same endpoint with the item.
The property should be used in a bound request context, or the
:class:`RuntimeError` may be raised.
"""
if not self.is_internal:
return False # always false for external url
has_same_endpoint = (request.endpoint == self.endpoint)
has_same_args = (request.view_args == self.args)
return has_same_endpoint and has_same_args # matches the endpoint
@property
def ident(self):
"""The identity of this item.
:type: :class:`~flask.ext.navigation.Navigation.ItemReference`
"""
return ItemReference(self.endpoint, self.args)
class ItemCollection(collections.MutableSequence,
collections.Iterable):
"""The collection of navigation items.
This collection is a mutable sequence. All items have order index, and
could be found by its endpoint name. e.g.::
c = ItemCollection()
c.append(Item(endpoint='doge'))
print(c['doge']) # output: Item(endpoint='doge')
print(c[0]) # output: Item(endpoint='doge')
print(c) # output: ItemCollection([Item(endpoint='doge')])
print(len(c)) # output: 1
c.append(Item(endpoint='lumpy', args={'num': 4}))
print(c[1]) # output: Item(endpoint='lumpy', args={'num': 4})
assert c['lumpy', {'num': 4}] is c[1]
"""
def __init__(self, iterable=None):
#: the item collection
self._items = []
#: the mapping collection of endpoint -> item
self._items_mapping = {}
#: initial extending
self.extend(iterable or [])
def __repr__(self):
return 'ItemCollection(%r)' % self._items
def __getitem__(self, index):
if isinstance(index, int):
return self._items[index]
if isinstance(index, tuple):
endpoint, args = index
else:
endpoint, args = index, {}
ident = ItemReference(endpoint, args)
return self._items_mapping[ident]
def __setitem__(self, index, item):
# remove the old reference
old_item = self._items[index]
del self._items_mapping[old_item.ident]
self._items[index] = item
self._items_mapping[item.ident] = item
def __delitem__(self, index):
item = self[index]
del self._items[index]
del self._items_mapping[item.ident]
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def insert(self, index, item):
self._items.insert(index, item)
self._items_mapping[item.ident] = item
class ItemReference(collections.namedtuple('ItemReference', 'endpoint args')):
"""The identity tuple of navigation item.
:param endpoint: the endpoint of view function.
:type endpoint: ``str``
:param args: the arguments of view function.
:type args: ``dict``
"""
def __new__(cls, endpoint, args=()):
if isinstance(args, dict):
args = freeze_dict(args)
return super(cls, ItemReference).__new__(cls, endpoint, args)
| |
import os
import subprocess
import re
import sys
if os.name == 'nt':
from ctypes import windll, create_unicode_buffer
try:
# Allow using this file on the website where the sublime
# module is unavailable
import sublime
except (ImportError):
sublime = None
from .console_write import console_write
from .unicode import unicode_from_os
from .show_error import show_error
from . import text
try:
# Python 2
str_cls = unicode
except (NameError):
# Python 3
str_cls = str
def create_cmd(args, basename_binary=False):
"""
Takes an array of strings to be passed to subprocess.Popen and creates
a string that can be pasted into a terminal
:param args:
The array containing the binary name/path and all arguments
:param basename_binary:
If only the basename of the binary should be disabled instead of the full path
:return:
The command string
"""
if basename_binary:
args[0] = os.path.basename(args[0])
if os.name == 'nt':
return subprocess.list2cmdline(args)
else:
escaped_args = []
for arg in args:
if re.search('^[a-zA-Z0-9/_^\\-\\.:=]+$', arg) == None:
arg = u"'" + arg.replace(u"'", u"'\\''") + u"'"
escaped_args.append(arg)
return u' '.join(escaped_args)
class Cli(object):
"""
Base class for running command line apps
:param binary_locations:
The full filesystem path to the executable for the version control
system. May be set to None to allow the code to try and find it. May
also be a list of locations to attempt. This allows settings to be
shared across operating systems.
"""
# Prevent duplicate lookups
binary_paths = {}
cli_name = None
def __init__(self, binary_locations, debug):
self.binary_locations = binary_locations
self.debug = debug
def execute(self, args, cwd, input=None, encoding='utf-8', meaningful_output=False, ignore_errors=None):
"""
Creates a subprocess with the executable/args
:param args:
A list of the executable path and all arguments to it
:param cwd:
The directory in which to run the executable
:param input:
The input text to send to the program
:param meaningful_output:
If the output from the command is possibly meaningful and should
be displayed if in debug mode
:param ignore_errors:
A regex of errors to ignore
:return: A string of the executable output
"""
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Make sure the cwd is ascii
try:
cwd.encode('mbcs')
except UnicodeEncodeError:
buf = create_unicode_buffer(512)
if windll.kernel32.GetShortPathNameW(cwd, buf, len(buf)):
cwd = buf.value
if self.debug:
console_write(
u'''
Executing %s [%s]
''',
(create_cmd(args), cwd)
)
try:
if sys.platform == 'win32' and sys.version_info < (3,):
cwd = cwd.encode('mbcs')
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=startupinfo, cwd=cwd, env=os.environ)
if input and isinstance(input, str_cls):
input = input.encode(encoding)
stuck = True
if sublime:
def kill_proc():
if not stuck:
return
# This doesn't actually work!
proc.kill()
binary_name = os.path.basename(args[0])
if re.search('git', binary_name):
is_vcs = True
elif re.search('hg', binary_name):
is_vcs = True
message = u'The process %s seems to have gotten stuck.' % binary_name
if is_vcs:
message += text.format(
u'''
This is likely due to a password or passphrase
prompt. Please ensure %s works without a prompt, or
change the "ignore_vcs_packages" Package Control
setting to true.
Sublime Text will need to be restarted once these
changes are made.
''',
binary_name
)
show_error(message)
sublime.set_timeout(kill_proc, 60000)
output, _ = proc.communicate(input)
stuck = False
output = output.decode(encoding)
output = output.replace('\r\n', '\n').rstrip(' \n\r')
if proc.returncode != 0:
if not ignore_errors or re.search(ignore_errors, output) is None:
show_error(
u'''
Error executing: %s
%s
VCS-based packages can be ignored with the
"ignore_vcs_packages" setting.
''',
(create_cmd(args), output)
)
return False
if meaningful_output and self.debug and len(output) > 0:
console_write(output, indent=' ', prefix=False)
return output
except (OSError) as e:
show_error(
u'''
Error executing: %s
%s
Try checking your "%s_binary" setting?
''',
(create_cmd(args), unicode_from_os(e), self.cli_name)
)
return False
def find_binary(self, name):
"""
Locates the executable by looking in the PATH and well-known directories
:param name:
The string filename of the executable
:return:
The filesystem path to the executable, or None if not found
"""
# Use the cached path
if self.cli_name in Cli.binary_paths:
return Cli.binary_paths[self.cli_name]
check_binaries = []
# Use the settings first
if self.binary_locations:
if not isinstance(self.binary_locations, list):
self.binary_locations = [self.binary_locations]
check_binaries.extend(self.binary_locations)
# Next check the PATH
for dir_ in os.environ['PATH'].split(os.pathsep):
check_binaries.append(os.path.join(dir_, name))
# Finally look in common locations that may not be in the PATH
if os.name == 'nt':
dirs = ['C:\\Program Files\\Git\\bin',
'C:\\Program Files (x86)\\Git\\bin',
'C:\\Program Files\\TortoiseGit\\bin',
'C:\\Program Files\\Mercurial',
'C:\\Program Files (x86)\\Mercurial',
'C:\\Program Files (x86)\\TortoiseHg',
'C:\\Program Files\\TortoiseHg',
'C:\\cygwin\\bin']
else:
# ST seems to launch with a minimal set of environmental variables
# on OS X, so we add some common paths for it
dirs = ['/usr/local/git/bin', '/usr/local/bin']
for dir_ in dirs:
check_binaries.append(os.path.join(dir_, name))
if self.debug:
console_write(
u'''
Looking for %s at: "%s"
''',
(self.cli_name, '", "'.join(check_binaries))
)
for path in check_binaries:
if os.path.exists(path) and not os.path.isdir(path) and os.access(path, os.X_OK):
if self.debug:
console_write(
u'''
Found %s at "%s"
''',
(self.cli_name, path)
)
Cli.binary_paths[self.cli_name] = path
return path
if self.debug:
console_write(
u'''
Could not find %s on your machine
''',
self.cli_name
)
return None
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import json
import os
import re
import sys
import time
import uuid
import fixtures
from oslo_log import log as logging
from oslo_utils import importutils
import six
from six.moves.urllib import parse
import testscenarios
import testtools
from neutron.tests.api import clients
from neutron.tests.tempest.common import credentials
import neutron.tests.tempest.common.generator.valid_generator as valid
from neutron.tests.tempest import config
from neutron.tests.tempest import exceptions
LOG = logging.getLogger(__name__)
CONF = config.CONF
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return f
return decorator
def idempotent_id(id):
"""Stub for metadata decorator"""
if not isinstance(id, six.string_types):
raise TypeError('Test idempotent_id must be string not %s'
'' % type(id).__name__)
uuid.UUID(id)
def decorator(f):
f = testtools.testcase.attr('id-%s' % id)(f)
if f.__doc__:
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
else:
f.__doc__ = 'Test idempotent id: %s' % id
return f
return decorator
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
'telemetry': CONF.service_available.ceilometer,
'data_processing': CONF.service_available.sahara,
'database': CONF.service_available.trove
}
return service_list
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
'telemetry', 'data_processing', 'database']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
'service' % service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
service_list = get_service_list()
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if len(config_dict[service]) == 0:
return False
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
class BaseTestCase(testtools.testcase.WithAttributes,
testtools.TestCase):
"""The test base class defines Tempest framework for class level fixtures.
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
by subclasses (enforced via hacking rule T105).
Set-up is split in a series of steps (setup stages), which can be
overwritten by test classes. Set-up stages are:
- skip_checks
- setup_credentials
- setup_clients
- resource_setup
Tear-down is also split in a series of steps (teardown stages), which are
stacked for execution only if the corresponding setup stage had been
reached during the setup phase. Tear-down stages are:
- clear_isolated_creds (defined in the base test class)
- resource_cleanup
"""
setUpClassCalled = False
_service = None
network_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
# stress tests too early, and depending on testr order will fail unit tests
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
'[%(name)s] %(message)s')
@classmethod
def setUpClass(cls):
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
# Stack of (name, callable) to be invoked in reverse order at teardown
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
cls.setup_credentials()
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
cls.teardowns.append(('resources', cls.resource_cleanup))
cls.resource_setup()
except Exception:
etype, value, trace = sys.exc_info()
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
etype, cls.__name__))
cls.tearDownClass()
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
# Save any existing exception, we always want to re-raise the original
# exception only
etype, value, trace = sys.exc_info()
# If there was no exception during setup we shall re-raise the first
# exception in teardown
re_raise = (etype is None)
while cls.teardowns:
name, teardown = cls.teardowns.pop()
# Catch any exception in tearDown so we can re-raise the original
# exception at the end
try:
teardown()
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
# TODO(andreaf): Till we have the ability to cleanup only
# resources that were successfully setup in resource_cleanup,
# log AttributeError as info instead of exception.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s" % (name, te))
else:
LOG.exception("teardown of %s failed: %s" % (name, te))
if not etype:
etype, value, trace = sys_exec_info
# If exceptions were raised during teardown, an not before, re-raise
# the first one
if re_raise and etype is not None:
try:
raise etype, value, trace
finally:
del trace # to avoid circular refs
@classmethod
def skip_checks(cls):
"""Class level skip checks. Subclasses verify in here all
conditions that might prevent the execution of the entire test class.
Checks implemented here may not make use API calls, and should rely on
configuration alone.
In general skip checks that require an API call are discouraged.
If one is really needed it may be implemented either in the
resource_setup or at test level.
"""
pass
@classmethod
def setup_credentials(cls):
"""Allocate credentials and the client managers from them."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify a list of (additional) credentials the need to use.
pass
@classmethod
def setup_clients(cls):
"""Create links to the clients into the test object."""
# TODO(andreaf) There is a fair amount of code that could me moved from
# base / test classes in here. Ideally tests should be able to only
# specify which client is `client` and nothing else.
pass
@classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
"""
pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
Resource cleanup must be able to handle the case of partially setup
resources, in case a failure during `resource_setup` should happen.
"""
pass
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=self.log_format,
level=None))
@classmethod
def get_client_manager(cls):
"""
Returns an OpenStack client manager
"""
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if (not hasattr(cls, 'isolated_creds') or
not cls.isolated_creds.name == cls.__name__):
cls.isolated_creds = credentials.get_isolated_credentials(
name=cls.__name__, network_resources=cls.network_resources,
force_tenant_isolation=force_tenant_isolation,
)
creds = cls.isolated_creds.get_primary_creds()
os = clients.Manager(credentials=creds, service=cls._service)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if hasattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(service=cls._service)
admin_client = os.identity_client
return admin_client
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not cls.network_resources:
cls.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager()
cls.client = os.negative_client
os_admin = clients.AdminManager(service=cls._service)
cls.admin_client = os_admin.negative_client
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema = getattr(test, '_schema', None)
if schema is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description):
"""
Generates the test scenario list for a given description.
:param description: A file or dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for scenario in generator.generate_scenarios(schema):
scenario_list.append((scenario['_negtest_name'],
scenario))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A json file or dictionary with the following
entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
expected_result = None
if "default_result_code" in description:
expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "_negtest_name"):
schema_under_test = \
valid.ValidTestGenerator().generate_valid(schema)
local_expected_result = \
generator.generate_payload(self, schema_under_test)
if local_expected_result is not None:
expected_result = local_expected_result
new_url, body = \
self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
client = self.admin_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, parse.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
if hasattr(self, '_schema'):
self.execute(self._schema)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
func_name = 'test_%s' % lower_cn
setattr(klass, func_name, generic_test)
return klass
def call_until_true(func, duration, sleep_for):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func():
return True
time.sleep(sleep_for)
now = time.time()
return False
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class ReviewsOperations(object):
"""ReviewsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar content_type: The content type. Constant value: "text/plain".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
self.content_type = "text/plain"
def get_review(
self, team_name, review_id, custom_headers=None, raw=False, **operation_config):
"""Returns review details for the review Id passed.
:param team_name: Your Team Name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Review or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.contentmoderator.models.Review
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Review', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_job_details(
self, team_name, job_id, custom_headers=None, raw=False, **operation_config):
"""Get the Job Details for a Job Id.
:param team_name: Your Team Name.
:type team_name: str
:param job_id: Id of the job.
:type job_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Job or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.contentmoderator.models.Job or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/jobs/{JobId}'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'JobId': self._serialize.url("job_id", job_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Job', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_reviews(
self, url_content_type, team_name, create_review_body, sub_team=None, custom_headers=None, raw=False, **operation_config):
"""The reviews created would show up for Reviewers on your team. As
Reviewers complete reviewing, results of the Review would be POSTED
(i.e. HTTP POST) on the specified CallBackEndpoint.
<h3>CallBack Schemas </h3>
<h4>Review Completion CallBack Sample</h4>
<p>
{<br/>
"ReviewId": "<Review Id>",<br/>
"ModifiedOn": "2016-10-11T22:36:32.9934851Z",<br/>
"ModifiedBy": "<Name of the Reviewer>",<br/>
"CallBackType": "Review",<br/>
"ContentId": "<The ContentId that was specified input>",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",<br/>
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
},<br/>
"ReviewerResultTags": {<br/>
"a": "False",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>.
:param url_content_type: The content type.
:type url_content_type: str
:param team_name: Your team name.
:type team_name: str
:param create_review_body: Body for create reviews API
:type create_review_body:
list[~azure.cognitiveservices.vision.contentmoderator.models.CreateReviewBodyItem]
:param sub_team: SubTeam of your team, you want to assign the created
review to.
:type sub_team: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[str] or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if sub_team is not None:
query_parameters['subTeam'] = self._serialize.query("sub_team", sub_team, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['UrlContentType'] = self._serialize.header("url_content_type", url_content_type, 'str')
# Construct body
body_content = self._serialize.body(create_review_body, '[CreateReviewBodyItem]')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[str]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_job(
self, team_name, content_type, content_id, workflow_name, job_content_type, content_value, call_back_endpoint=None, custom_headers=None, raw=False, **operation_config):
"""A job Id will be returned for the content posted on this endpoint.
Once the content is evaluated against the Workflow provided the review
will be created or ignored based on the workflow expression.
<h3>CallBack Schemas </h3>
<p>
<h4>Job Completion CallBack Sample</h4><br/>
{<br/>
"JobId": "<Job Id>,<br/>
"ReviewId": "<Review Id, if the Job resulted in a Review to be
created>",<br/>
"WorkFlowId": "default",<br/>
"Status": "<This will be one of Complete, InProgress, Error>",<br/>
"ContentType": "Image",<br/>
"ContentId": "<This is the ContentId that was specified on
input>",<br/>
"CallBackType": "Job",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",<br/>
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>
<p>
<h4>Review Completion CallBack Sample</h4><br/>
{
"ReviewId": "<Review Id>",<br/>
"ModifiedOn": "2016-10-11T22:36:32.9934851Z",<br/>
"ModifiedBy": "<Name of the Reviewer>",<br/>
"CallBackType": "Review",<br/>
"ContentId": "<The ContentId that was specified input>",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
},<br/>
"ReviewerResultTags": {<br/>
"a": "False",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>.
:param team_name: Your team name.
:type team_name: str
:param content_type: Image, Text or Video. Possible values include:
'Image', 'Text', 'Video'
:type content_type: str
:param content_id: Id/Name to identify the content submitted.
:type content_id: str
:param workflow_name: Workflow Name that you want to invoke.
:type workflow_name: str
:param job_content_type: The content type. Possible values include:
'application/json', 'image/jpeg'
:type job_content_type: str
:param content_value: Content to evaluate for a job.
:type content_value: str
:param call_back_endpoint: Callback endpoint for posting the create
job result.
:type call_back_endpoint: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: JobId or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.contentmoderator.models.JobId
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
content = models.Content(content_value=content_value)
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/jobs'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['ContentType'] = self._serialize.query("content_type", content_type, 'str')
query_parameters['ContentId'] = self._serialize.query("content_id", content_id, 'str')
query_parameters['WorkflowName'] = self._serialize.query("workflow_name", workflow_name, 'str')
if call_back_endpoint is not None:
query_parameters['CallBackEndpoint'] = self._serialize.query("call_back_endpoint", call_back_endpoint, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("job_content_type", job_content_type, 'str')
# Construct body
body_content = self._serialize.body(content, 'Content')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('JobId', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def add_video_frame(
self, team_name, review_id, timescale=None, custom_headers=None, raw=False, **operation_config):
"""The reviews created would show up for Reviewers on your team. As
Reviewers complete reviewing, results of the Review would be POSTED
(i.e. HTTP POST) on the specified CallBackEndpoint.
<h3>CallBack Schemas </h3>
<h4>Review Completion CallBack Sample</h4>
<p>
{<br/>
"ReviewId": "<Review Id>",<br/>
"ModifiedOn": "2016-10-11T22:36:32.9934851Z",<br/>
"ModifiedBy": "<Name of the Reviewer>",<br/>
"CallBackType": "Review",<br/>
"ContentId": "<The ContentId that was specified input>",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",<br/>
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
},<br/>
"ReviewerResultTags": {<br/>
"a": "False",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>.
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param timescale: Timescale of the video you are adding frames to.
:type timescale: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/frames'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timescale is not None:
query_parameters['timescale'] = self._serialize.query("timescale", timescale, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_video_frames(
self, team_name, review_id, start_seed=None, no_of_records=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""The reviews created would show up for Reviewers on your team. As
Reviewers complete reviewing, results of the Review would be POSTED
(i.e. HTTP POST) on the specified CallBackEndpoint.
<h3>CallBack Schemas </h3>
<h4>Review Completion CallBack Sample</h4>
<p>
{<br/>
"ReviewId": "<Review Id>",<br/>
"ModifiedOn": "2016-10-11T22:36:32.9934851Z",<br/>
"ModifiedBy": "<Name of the Reviewer>",<br/>
"CallBackType": "Review",<br/>
"ContentId": "<The ContentId that was specified input>",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",<br/>
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
},<br/>
"ReviewerResultTags": {<br/>
"a": "False",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>.
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param start_seed: Time stamp of the frame from where you want to
start fetching the frames.
:type start_seed: int
:param no_of_records: Number of frames to fetch.
:type no_of_records: int
:param filter: Get frames filtered by tags.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Frames or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.vision.contentmoderator.models.Frames
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/frames'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_seed is not None:
query_parameters['startSeed'] = self._serialize.query("start_seed", start_seed, 'int')
if no_of_records is not None:
query_parameters['noOfRecords'] = self._serialize.query("no_of_records", no_of_records, 'int')
if filter is not None:
query_parameters['filter'] = self._serialize.query("filter", filter, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Frames', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def publish_video_review(
self, team_name, review_id, custom_headers=None, raw=False, **operation_config):
"""Publish video review to make it available for review.
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/publish'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_video_transcript_moderation_result(
self, content_type, team_name, review_id, transcript_moderation_body, custom_headers=None, raw=False, **operation_config):
"""This API adds a transcript screen text result file for a video review.
Transcript screen text result file is a result of Screen Text API . In
order to generate transcript screen text result file , a transcript
file has to be screened for profanity using Screen Text API.
:param content_type: The content type.
:type content_type: str
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param transcript_moderation_body: Body for add video transcript
moderation result API
:type transcript_moderation_body:
list[~azure.cognitiveservices.vision.contentmoderator.models.TranscriptModerationBodyItem]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/transcriptmoderationresult'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct body
body_content = self._serialize.body(transcript_moderation_body, '[TranscriptModerationBodyItem]')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_video_transcript(
self, team_name, review_id, vt_tfile, custom_headers=None, raw=False, callback=None, **operation_config):
"""This API adds a transcript file (text version of all the words spoken
in a video) to a video review. The file should be a valid WebVTT
format.
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param vt_tfile: Transcript file of the video.
:type vt_tfile: Generator
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param callback: When specified, will be called with each chunk of
data that is streamed. The callback should take two arguments, the
bytes of the current chunk of data and the response object. If the
data is uploading, response will be None.
:type callback: Callable[Bytes, response=None]
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/transcript'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'text/plain'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("self.content_type", self.content_type, 'str')
# Construct body
body_content = self._client.stream_upload(vt_tfile, callback)
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def create_video_reviews(
self, content_type, team_name, create_video_reviews_body, sub_team=None, custom_headers=None, raw=False, **operation_config):
"""The reviews created would show up for Reviewers on your team. As
Reviewers complete reviewing, results of the Review would be POSTED
(i.e. HTTP POST) on the specified CallBackEndpoint.
<h3>CallBack Schemas </h3>
<h4>Review Completion CallBack Sample</h4>
<p>
{<br/>
"ReviewId": "<Review Id>",<br/>
"ModifiedOn": "2016-10-11T22:36:32.9934851Z",<br/>
"ModifiedBy": "<Name of the Reviewer>",<br/>
"CallBackType": "Review",<br/>
"ContentId": "<The ContentId that was specified input>",<br/>
"Metadata": {<br/>
"adultscore": "0.xxx",<br/>
"a": "False",<br/>
"racyscore": "0.xxx",<br/>
"r": "True"<br/>
},<br/>
"ReviewerResultTags": {<br/>
"a": "False",<br/>
"r": "True"<br/>
}<br/>
}<br/>
</p>.
:param content_type: The content type.
:type content_type: str
:param team_name: Your team name.
:type team_name: str
:param create_video_reviews_body: Body for create reviews API
:type create_video_reviews_body:
list[~azure.cognitiveservices.vision.contentmoderator.models.CreateVideoReviewsBodyItem]
:param sub_team: SubTeam of your team, you want to assign the created
review to.
:type sub_team: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[str] or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if sub_team is not None:
query_parameters['subTeam'] = self._serialize.query("sub_team", sub_team, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct body
body_content = self._serialize.body(create_video_reviews_body, '[CreateVideoReviewsBodyItem]')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.APIErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[str]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def add_video_frame_url(
self, content_type, team_name, review_id, video_frame_body, timescale=None, custom_headers=None, raw=False, **operation_config):
"""Use this method to add frames for a video review.Timescale: This
parameter is a factor which is used to convert the timestamp on a frame
into milliseconds. Timescale is provided in the output of the Content
Moderator video media processor on the Azure Media Services
platform.Timescale in the Video Moderation output is Ticks/Second.
:param content_type: The content type.
:type content_type: str
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param video_frame_body: Body for add video frames API
:type video_frame_body:
list[~azure.cognitiveservices.vision.contentmoderator.models.VideoFrameBodyItem]
:param timescale: Timescale of the video.
:type timescale: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/frames'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timescale is not None:
query_parameters['timescale'] = self._serialize.query("timescale", timescale, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct body
body_content = self._serialize.body(video_frame_body, '[VideoFrameBodyItem]')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def add_video_frame_stream(
self, content_type, team_name, review_id, frame_image_zip, frame_metadata, timescale=None, custom_headers=None, raw=False, **operation_config):
"""Use this method to add frames for a video review.Timescale: This
parameter is a factor which is used to convert the timestamp on a frame
into milliseconds. Timescale is provided in the output of the Content
Moderator video media processor on the Azure Media Services
platform.Timescale in the Video Moderation output is Ticks/Second.
:param content_type: The content type.
:type content_type: str
:param team_name: Your team name.
:type team_name: str
:param review_id: Id of the review.
:type review_id: str
:param frame_image_zip: Zip file containing frame images.
:type frame_image_zip: Generator
:param frame_metadata: Metadata of the frame.
:type frame_metadata: str
:param timescale: Timescale of the video .
:type timescale: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`APIErrorException<azure.cognitiveservices.vision.contentmoderator.models.APIErrorException>`
"""
# Construct URL
url = '/contentmoderator/review/v1.0/teams/{teamName}/reviews/{reviewId}/frames'
path_format_arguments = {
'baseUrl': self._serialize.url("self.config.base_url", self.config.base_url, 'str', skip_quote=True),
'teamName': self._serialize.url("team_name", team_name, 'str'),
'reviewId': self._serialize.url("review_id", review_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if timescale is not None:
query_parameters['timescale'] = self._serialize.query("timescale", timescale, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
# Construct form data
form_data_content = {
'frameImageZip': frame_image_zip,
'frameMetadata': frame_metadata,
}
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send_formdata(
request, header_parameters, form_data_content, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.APIErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| |
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Redirect related methods.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from google.appengine.ext import db
# TODO: Maybe redirects should become a class?
# Then the program specific redirects may inherit from this class.
# It is easier to maintain imports, as a view does not need to
# import both redirects and gsoc/redirects modules.
def getApplyRedirect(entity, params):
"""Returns the apply redirect for the specified entity.
"""
result ='/%s/apply/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getInviteRedirect(entity, params):
"""Returns the invitation redirect for the specified entity.
"""
result ='/%s/invite/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getCreateRedirect(entity, params):
"""Returns the create redirect for the specified entity.
"""
result ='/%s/create/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getEditRedirect(entity, params):
"""Returns the edit redirect for the specified entity.
"""
return '/%s/edit/%s' % (
params['url_name'], entity.key().id_or_name())
def getPublicRedirect(entity, params):
"""Returns the public redirect for the specified entity.
"""
return '/%s/show/%s' % (
params['url_name'], entity.key().id_or_name())
def getAdminRedirect(entity, params):
"""Returns the public redirect for the specified entity.
"""
return '/%s/admin/%s' % (
params['url_name'], entity.key().id_or_name())
def getListRedirect(entity, params):
"""Returns the public redirect for the specified entity.
"""
return '/%s/list/%s' % (
params['url_name'], entity.key().id_or_name())
def getPublicListRedirect(entity, params):
"""Returns the public redirect for the specified entity.
"""
return '/%s/list_public/%s' % (
params['url_name'], entity.key().id_or_name())
def getExportRedirect(entity, params):
"""Returns the export redirect for the specified entity.
"""
return '/%s/export/%s' % (
params['url_name'], entity.key().id_or_name())
def getHomeRedirect(entity, params):
"""Returns the home redirect for the specified entity.
"""
return '/%s/home/%s' % (
params['url_name'], entity.key().id_or_name())
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name())
def getReviewOverviewRedirect(entity, params):
"""Returns the redirect to the Review Overview page for Org Applications.
Args:
entity: OrgAppSurvey entity
params: Org App View params
"""
return '/%s/review_overview/%s' % (
params['url_name'], entity.key().id_or_name())
def getCreateRequestRedirect(entity, params):
"""Returns the create request redirect for the specified entity.
"""
result ='/request/create/%s/%s/%s' % (
params['group_scope'], params['url_name'], entity.key().id_or_name())
return result
def getRequestRedirectForRole(entity, role_name):
"""Returns the redirect to create a request for a specific role.
"""
result ='/%s/request/%s' % (
role_name, entity.key().id_or_name())
return result
def getInviteRedirectForRole(entity, role_name):
"""Returns the redirect to create an invite for a specific role.
"""
result ='/%s/invite/%s' % (
role_name, entity.key().id_or_name())
return result
def getListProposalsRedirect(entity, params):
"""Returns the redirect for the List page for the given
Org entity and Org View params.
"""
result = '/%s/list_proposals/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getAcceptedOrgsRedirect(entity, params):
"""Returns the redirect for the List of accepted orgs.
"""
result = '/%s/accepted_orgs/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getListProjectsRedirect(entity, params):
"""Returns the redirect for the List Projects page for the given entity.
"""
result = '/%s/list_projects/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getListRequestsRedirect(entity, params):
"""Returns the redirect for the List Requests paged for the given
Group entity and Group View params.
"""
result = '/%s/list_requests/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getListSelfRedirect(entity, params):
"""Returns the redirect for list_self access type.
"""
result = '/%s/list_self/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getListRolesRedirect(entity, params):
"""Returns the redirect for the List Roles paged for the given
Group entity and Group View params.
"""
result = '/%s/list_roles/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getListParticipantsRedirect(entity, params):
"""Returns the redirect for the List of all participants in a program.
"""
result = '/%s/list_participants/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getWithdrawRedirect(entity, params):
"""Returns the redirect for withdraw_project access type.
"""
result = '/%s/withdraw/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getUserRolesRedirect(_, __):
"""Returns the redirect to the users Roles page.
"""
return '/user/roles'
def getProcessRequestRedirect(entity, _):
"""Returns the redirect for processing the specified request entity.
"""
from soc.views.models.role import ROLE_VIEWS
role_view = ROLE_VIEWS[entity.role]
result = '/%s/process_request/%s' % (
role_view.getParams()['url_name'], entity.key().id_or_name())
return result
def getManageRedirect(entity, params):
"""Returns the redirect for managing the given entity.
"""
result = '/%s/manage/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getManageOverviewRedirect(entity, params):
"""Returns the redirect for the manage overview view of the given entity.
"""
result = '/%s/manage_overview/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getOverviewRedirect(entity, params):
"""Returns the redirect for the manage overview view of the given entity.
"""
result = '/%s/overview/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getSelectRedirect(params):
"""Returns the pick redirect for the specified entity.
"""
if params.get('args'):
return '/%(url_name)s/pick?%(args)s' % params
else:
return '/%(url_name)s/pick' % params
def getInviteAcceptedRedirect(entity, _):
"""Returns the redirect for accepting an invite.
"""
from soc.views.models.role import ROLE_VIEWS
role_params = ROLE_VIEWS[entity.role].getParams()
return '/%s/accept_invite/%s' % (
role_params['url_name'], entity.key().id_or_name())
def getInviteProcessRedirect(entity, _):
"""Returns the redirect for processing an invite.
"""
return '/request/process_invite/%s' % (
entity.key().id_or_name())
def getApplicantRedirect(entity, params):
"""Returns the redirect for processing accepted Applications.
"""
return '/%s/applicant/%s?id=%s' % (
params['url_name'], params['program'].key().id_or_name(),
entity.key().id_or_name())
def getStudentPrivateRedirect(entity, params):
"""Returns private redirect for the specified entity.
"""
return '/%s/private/%s' % (
params['url_name'], entity.key().id_or_name())
def getStudentEditRedirect(entity, params):
"""Returns the redirect for Students to edit their Projects.
"""
return '/%s/st_edit/%s' % (
params['url_name'], entity.key().id_or_name())
def getProposalCommentRedirect(entity, params):
"""Returns comment redirect for the specified student proposal.
"""
return '/%s/comment/%s' % (
params['url_name'], entity.key().id_or_name())
def getStudentProposalRedirect(entity, params):
"""Returns the student proposal redirect for the given org and student.
"""
result ='/%s/apply/%s?organization=%s' % (
params['url_name'], params['student_key'], entity.link_id)
return result
def getShowDuplicatesRedirect(entity, params):
"""Returns the show duplicates redirect for the specified entity.
"""
return'/%s/show_duplicates/%s' % (
params['url_name'], entity.key().name())
def getSlotsRedirect(entity, params):
"""Returns the slots redirect for the specified entity.
"""
return'/%s/slots/%s' % (
params['url_name'], entity.key().id_or_name())
def getAssignSlotsRedirect(entity, params):
"""Returns the assign slots redirect for the specified entity.
"""
return'/%s/assign_slots/%s' % (
params['url_name'], entity.key().id_or_name())
def getCreateDocumentRedirect(entity, prefix):
"""Returns the redirect for new documents.
"""
return '/document/create/%s/%s' % (prefix, entity.key().id_or_name())
def getListDocumentsRedirect(entity, prefix):
"""Returns the redirect for listing documents.
"""
return '/document/list/%s/%s' % (prefix, entity.key().id_or_name())
def getCreateSurveyRedirect(entity, prefix, url_name):
"""Returns the redirect for new surveys.
"""
return '/%s/create/%s/%s' % (url_name, prefix, entity.key().id_or_name())
def getListSurveysRedirect(entity, prefix, url_name):
"""Returns the redirect for listing surveys.
"""
return '/%s/list/%s/%s' % (url_name, prefix, entity.key().id_or_name())
def getTakeSurveyRedirect(entity, info):
"""Returns the redirect for taking a Survey.
Args:
entity: a Survey entity
info: a dictionary contain a survey and params entry
"""
survey_entity = entity
params = info
return '/%s/take/%s' % (params['url_name'],
survey_entity.key().id_or_name())
def getReviewOrgAppSurvey(survey_record, info):
"""Returns redirect to retake a OrgAppSurvey.
Args:
survey_record: OrgAppRecord entity
info: a dictionary with survey and url_name entry
"""
return '/%s/review/%s?id=%s' % (
info['url_name'], info['survey'].key().id_or_name(),
survey_record.key().id_or_name())
def getRetakeOrgAppSurveyRedirect(survey_record, info):
"""Returns redirect to retake a OrgAppSurvey.
Args:
survey_record: OrgAppRecord entity
info: a dictionary with survey and url_name entry
"""
return '/%s/take/%s?id=%s' % (
info['url_name'], info['survey'].key().id_or_name(),
survey_record.key().id_or_name())
def getTakeProjectSurveyRedirect(entity, info):
"""Returns the redirect for taking a Survey for the given Student Project.
Args:
entity: a StudentProject entity
info: a dictionary contain a survey and params entry
"""
survey_entity = info['survey']
params = info['params']
return '/%s/take/%s?project=%s' % (params['url_name'],
survey_entity.key().id_or_name(),
entity.key().id_or_name())
def getViewSurveyRecordRedirect(entity, params):
"""Returns the redirect for view a Survey Record
for the given Survey Record.
Args:
entity: a Survey Record entity
params: params for a Survey view
"""
return '/%s/record/%s?id=%s' % (
params['url_name'],
entity.survey.key().id_or_name(),
entity.key().id_or_name())
def getEditGradingRecordRedirect(entity, params):
"""Returns the redirect for editing a given GradingRecord.
"""
return '/%s/edit_record/%s?id=%s' % (
params['url_name'],
entity.grading_survey_group.key().id_or_name(),
entity.key().id_or_name())
def getViewRecordsRedirect(entity, params):
"""Returns the redirect to view all GradingRecords for one
GradingSurveyGroup.
"""
return '/%s/records/%s?' % (
params['url_name'],
entity.key().id_or_name())
def getToSRedirect(presence):
"""Returns link to 'show' the ToS Document if it exists, None otherwise.
Args:
presence: Presence entity that may or may not have a tos property
"""
if not presence:
return None
try:
tos_doc = presence.tos
except db.Error:
return None
if not tos_doc:
return None
return getPublicRedirect(tos_doc, {'url_name': 'document'})
def getSubscribeRedirect(entity, params):
"""Redirects to subscription XML doc for an entity.
"""
return'/%s/subscribe/%s' % (
params['url_name'], entity.key().name())
def getManageStatisticsRedirect(entity, params):
"""Returns redirect for managing statistic view.
"""
result = '/%s/manage_stats/%s' % (
params['url_name'], entity.key().id_or_name())
return result
def getCreateProfileRedirect(params):
"""Returns the create profile redirect for creating link id for the user.
"""
result ='/%s/create_profile' % (
params['url_name'])
return result
def getEditProfileRedirect(params):
"""Returns the edit profile redirect for editing the profile details.
"""
result ='/%s/edit_profile' % (
params['url_name'])
return result
def getRolesRedirect(params):
"""Returns the edit profile redirect for editing the profile details.
"""
result ='/%s/roles' % (
params['url_name'])
return result
def getRequestsRedirect(params):
"""Returns the edit profile redirect for editing the profile details.
"""
result ='/%s/requests' % (
params['url_name'])
return result
def getStudentApplyRedirect(entity, params):
"""Returns the student application for redirect for students to create
their new profiles.
"""
return '/%s/apply/%s' % (params['url_name'], entity.key().id_or_name())
def getDownloadBlobRedirectWithGet(entity, params, **kwargs):
"""Returns download blob redirect for the given blob entity.
This redirect is slightly different than other redirects because we
construct the URL with the GET arguments here. This is done instead
of the standard URL scheme because the Blob keys generated by Appengine
can have any characters which may not be able to be defined by a
standard Django regex.
Args:
entity: the BlobInfo entity which contains the blob key
params: dictionary containing url_name prefix
kwargs: a list of key-word arguments all of which are converted to
get query parameters
"""
base_redirect = '/%s/download_blob' % (params['url_name'])
scope_path = kwargs.pop('scope_path', None)
if scope_path:
base_redirect = '%s/%s' % (base_redirect, scope_path)
redirect_to = '%s?key=%s' % (base_redirect, entity.key())
redirect_to = '&'.join([redirect_to] + [
'='.join([k, kwargs[k]]) for k in kwargs])
return redirect_to
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.