gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
Variational Auto-Encoder.
Copyright (C) 2017, Lucas Ondel
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import abc
import pickle
import numpy as np
import theano
import theano.tensor as T
from .model import PersistentModel
from .mlp_utils import GaussianNeuralNetwork
from .mlp_utils import NeuralNetwork
class SVAE(object):
def __init__(self, encoder_structure, decoder_structure,
prior, var_reg=-1, no_llh=False, kl_weights=1.,
classifier_structure=None):
self.encoder = GaussianNeuralNetwork(encoder_structure, [])
self.params = self.encoder.params
if not no_llh:
self.decoder = GaussianNeuralNetwork(decoder_structure, [],
self.encoder.sample)
self.params += self.decoder.params
self.var_reg = var_reg
self.no_llh = no_llh
self.kl_weights = kl_weights
self.prior = prior
if classifier_structure is not None:
self.classifier = classifier_structure
#self.classifier = NeuralNetwork(classifier_structure, [],
# self.encoder.sample)
#self.params += self.classifier.params
else:
self.classifier = None
self._build()
def _build(self):
post_mean = self.encoder.mean
post_var = self.encoder.var
# KL divergence posterior/prior.
prior_mean = T.matrix(dtype=theano.config.floatX)
prior_var = T.matrix(dtype=theano.config.floatX)
kl_div = (.5 * (prior_mean - post_mean)**2) / prior_var
ratio = post_var / prior_var
kl_div += .5 * (ratio - 1 - T.log(ratio))
kl_div = T.sum(kl_div, axis=1)
# Log-likelihood of the data (up to a constant).
if not self.no_llh:
targets = self.encoder.inputs
mean = self.decoder.outputs
var = self.decoder.var
llh = -.5 * T.sum(T.log(var), axis=1)
llh += -.5 * T.sum(((targets - mean) ** 2) / var, axis=1)
if self.var_reg > 0:
llh += T.sum(T.log(self.var_reg) - self.var_reg * var, axis=1)
else:
llh = 0.
# Evidence Lower-Bound.
resps = T.matrix()
#if self.classifier is not None:
#classifier_llh = T.sum(resps * T.log(self.classifier.outputs), axis=1)
#llh += classifier_llh
#self.classify = theano.function(
# inputs=[self.encoder.inputs],
# outputs=T.argmax(self.classifier.outputs, axis=1)
#)
#s_stats = T.concatenate([post_var + post_mean**2, post_mean], axis=1)
s_stats = T.concatenate([self.encoder.sample**2, self.encoder.sample], axis=1)
pad_s_stats = T.concatenate([s_stats, T.ones_like(s_stats)], axis=1)
prediction = self.prior.sym_classify(pad_s_stats)
classifier_llh = T.sum(resps * T.log(prediction), axis=1)
if self.classifier is not None:
llh += classifier_llh
elbo = T.sum(llh - self.kl_weights * kl_div)
self._get_gradients = theano.function(
inputs=[self.encoder.inputs, prior_mean, prior_var, resps],
outputs=[T.sum(llh), self.kl_weights * T.sum(kl_div)] + \
[T.grad(elbo, param) for param in self.params],
on_unused_input='ignore'
)
self.classify = theano.function(
inputs=[self.encoder.inputs],
outputs=T.argmax(prediction, axis=1)
)
def decode(self, prior, data, state_path=False):
mean, var = self.encoder.forward(data)
return prior.decode(mean, state_path)
def classify(self, prior, data):
mean, var = self.encoder.forward(data)
# Expected value of the sufficient statistics.
s_stats = np.c_[mean**2 + var, mean,
np.ones((len(mean), 2 * mean.shape[1]))]
# Clustering.
log_norm, resps, acc_stats = prior.get_resps(s_stats)
return resps[0].T.argmax(axis=1)
def get_gradients(self, prior, data, log_resps=None):
mean, var = self.encoder.forward(data)
# Expected value of the sufficient statistics.
s_stats = np.c_[mean**2 + var, mean,
np.ones((len(mean), 2 * mean.shape[1]))]
# Clustering.
log_norm, resps, acc_stats = prior.get_resps(s_stats, log_resps)
#log_norm, resps, acc_stats = prior.get_resps(s_stats)
# Expected value of the prior's components parameters.
dim_latent = self.encoder.layers[-1].dim_out
p_np1 = [comp.posterior.grad_log_partition[:dim_latent]
for comp in prior.components]
p_np2 = [comp.posterior.grad_log_partition[dim_latent:2 * dim_latent]
for comp in prior.components]
q_np1 = resps[0].T.dot(p_np1)
q_np2 = resps[0].T.dot(p_np2)
# Convert the natural parameters to the standard parameters.
prior_var = -1 / (2 * q_np1)
prior_mean = q_np2 * prior_var
val_and_grads = self._get_gradients(data, prior_mean, prior_var,
resps[0].T)
#val_and_grads = self._get_gradients(data, prior_mean, prior_var,
# np.exp(log_resps))
return val_and_grads[0] - val_and_grads[1], val_and_grads[2:], \
acc_stats
class MLPClassifier(object):
def __init__(self, structure):
self.nnet = NeuralNetwork(structure, [])
self.params = self.nnet.params
self._build()
def _build(self):
# Evidence Lower-Bound.
resps = T.matrix()
prediction = self.nnet.outputs
llh = T.sum(resps * T.log(prediction))
self._get_gradients = theano.function(
inputs=[self.nnet.inputs, resps],
outputs=[llh] + \
[T.grad(llh, param) for param in self.params],
)
self.classify = theano.function(
inputs=[self.nnet.inputs],
outputs=T.argmax(prediction, axis=1)
)
def get_gradients(self, data, log_resps):
val_and_grads = self._get_gradients(data, np.exp(log_resps))
return val_and_grads[0], val_and_grads[1:]
| |
import gevent
import socket
from gevent.queue import Empty
class BaseTransport(object):
"""Base class for all transports. Mostly wraps handler class functions."""
def __init__(self, handler):
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.handler = handler
def encode(self, data):
return self.handler.environ['socketio'].encode(data)
def decode(self, data):
return self.handler.environ['socketio'].decode(data)
def write_packed(self, data):
self.write(data)
def write(self, data):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
self.handler.write(data)
def write_multipart(self, data):
self.handler.write(data)
def start_response(self, *args, **kwargs):
self.handler.start_response(*args, **kwargs)
class XHRPollingTransport(BaseTransport):
def handle_options_response(self):
self.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
("Connection", "close"),
("Content-Length", 0)
])
self.write('')
return []
def handle_get_response(self, session):
try:
message = session.get_client_msg(timeout=5.0)
message = self.encode(message)
except Empty:
message = ""
self.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Connection", "close"),
self.content_type,
])
self.write_packed(message)
return []
def handle_post_response(self, session):
data = self.handler.wsgi_input.readline().replace("data=", "")
messages = self.decode(data)
#for msg in messages:
session.put_server_msg(messages)
self.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Connection", "close"),
("Content-Type", "text/plain; charset=UTF-8"),
("Content-Length", "2"),
])
self.write("ok")
return []
def connect(self, session, request_method):
if session.is_new():
session_id = self.encode(session.session_id)
self.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Connection", "close"),
self.content_type,
])
self.write_packed(session_id)
return []
elif request_method == "GET":
session.clear_disconnect_timeout();
return self.handle_get_response(session)
elif request_method == "POST":
return self.handle_post_response(session)
elif request_method == "OPTIONS":
return self.handle_options_response()
else:
raise Exception("No support for such method: " + request_method)
class HTMLFileTransport(XHRPollingTransport):
"""Not tested at all!"""
def __init__(self, handler):
super(HTMLFileTransport, self).__init__(handler)
self.content_type = ("Content-Type", "text/html")
def write_packed(self, data):
self.write("<script>parent.s._('%s', document);</script>" % data)
def handle_get_response(self, session):
self.start_response("200 OK", [
("Connection", "keep-alive"),
("Content-Type", "text/html"),
("Transfer-Encoding", "chunked"),
])
self.write("<html><body>" + " " * 244)
try:
message = session.get_client_msg(timeout=5.0)
message = self.encode(message)
except Empty:
message = ""
self.write_packed(message)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def write_packed(self, data):
self.write("io.JSONP[0]._('%s');" % data)
class XHRMultipartTransport(XHRPollingTransport):
def connect(self, session, request_method):
if request_method == "GET":
heartbeat = self.handler.environ['socketio'].start_heartbeat()
response = self.handle_get_response(session)
return [heartbeat] + response
elif request_method == "POST":
return self.handle_post_response(session)
elif request_method == "OPTIONS":
return self.handle_options_response()
else:
raise Exception("No support for such method: " + request_method)
def handle_get_response(self, session):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Connection", "keep-alive"),
("Content-Type", "multipart/x-mixed-replace;boundary=\"socketio\""),
])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(self.encode(session.session_id) + "\r\n")
self.write_multipart("--socketio\r\n")
def send_part():
while True:
message = session.get_client_msg()
if message is None:
session.kill()
break
else:
message = self.encode(message)
try:
self.write_multipart(header)
self.write_multipart(message)
self.write_multipart("--socketio\r\n")
except socket.error:
session.kill()
break
return [gevent.spawn(send_part)]
class WebsocketTransport(BaseTransport):
def connect(self, session, request_method):
websocket = self.handler.environ['wsgi.websocket']
websocket.send(self.encode(session.session_id))
def send_into_ws():
while True:
message = session.get_client_msg()
if message is None:
session.kill()
break
websocket.send(self.encode(message))
def read_from_ws():
while True:
message = websocket.wait()
if message is None:
session.kill()
break
else:
session.put_server_msg(self.decode(message))
gr1 = gevent.spawn(send_into_ws)
gr2 = gevent.spawn(read_from_ws)
heartbeat = self.handler.environ['socketio'].start_heartbeat()
return [gr1, gr2, heartbeat]
class FlashSocketTransport(WebsocketTransport):
pass
| |
# Copyright 2014 eBay Software Foundation
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock, patch, PropertyMock
from trove.backup.models import Backup
from trove.instance.tasks import InstanceTasks
from trove.taskmanager.manager import Manager
from trove.taskmanager import models
from trove.taskmanager import service
from trove.common.exception import TroveError, ReplicationSlaveAttachError
from proboscis.asserts import assert_equal
from trove.tests.unittests import trove_testtools
class TestManager(trove_testtools.TestCase):
def setUp(self):
super(TestManager, self).setUp()
self.manager = Manager()
self.context = trove_testtools.TroveTestContext(self)
self.mock_slave1 = Mock()
self.mock_slave2 = Mock()
type(self.mock_slave1).id = PropertyMock(return_value='some-inst-id')
type(self.mock_slave2).id = PropertyMock(return_value='inst1')
self.mock_old_master = Mock()
type(self.mock_old_master).slaves = PropertyMock(
return_value=[self.mock_slave1, self.mock_slave2])
self.mock_master = Mock()
type(self.mock_master).slaves = PropertyMock(
return_value=[self.mock_slave1, self.mock_slave2])
def tearDown(self):
super(TestManager, self).tearDown()
self.manager = None
def test_getattr_lookup(self):
self.assertTrue(callable(self.manager.delete_cluster))
self.assertTrue(callable(self.manager.mongodb_add_shard_cluster))
def test_most_current_replica(self):
master = Mock()
master.id = 32
def test_case(txn_list, selected_master):
with patch.object(self.manager, '_get_replica_txns',
return_value=txn_list):
result = self.manager._most_current_replica(master, None)
assert_equal(result, selected_master)
with self.assertRaisesRegexp(TroveError,
'not all replicating from same'):
test_case([['a', '2a99e-32bf', 2], ['b', '2a', 1]], None)
test_case([['a', '2a99e-32bf', 2]], 'a')
test_case([['a', '2a', 1], ['b', '2a', 2]], 'b')
test_case([['a', '2a', 2], ['b', '2a', 1]], 'a')
test_case([['a', '2a', 1], ['b', '2a', 1]], 'a')
test_case([['a', None, 0]], 'a')
test_case([['a', None, 0], ['b', '2a', 1]], 'b')
def test_detach_replica(self):
slave = Mock()
master = Mock()
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[slave, master]):
self.manager.detach_replica(self.context, 'some-inst-id')
slave.detach_replica.assert_called_with(master)
@patch.object(Manager, '_set_task_status')
def test_promote_to_replica_source(self, mock_set_task_status):
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1,
self.mock_old_master,
self.mock_slave2]):
self.manager.promote_to_replica_source(
self.context, 'some-inst-id')
self.mock_slave1.detach_replica.assert_called_with(
self.mock_old_master, for_failover=True)
self.mock_old_master.attach_replica.assert_called_with(
self.mock_slave1)
self.mock_slave1.make_read_only.assert_called_with(False)
self.mock_slave2.detach_replica.assert_called_with(
self.mock_old_master, for_failover=True)
self.mock_slave2.attach_replica.assert_called_with(self.mock_slave1)
self.mock_old_master.demote_replication_master.assert_any_call()
mock_set_task_status.assert_called_with(([self.mock_old_master] +
[self.mock_slave1,
self.mock_slave2]),
InstanceTasks.NONE)
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
def test_eject_replica_source(self, mock_most_current_replica,
mock_set_task_status):
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.manager.eject_replica_source(self.context, 'some-inst-id')
mock_most_current_replica.assert_called_with(self.mock_master,
[self.mock_slave1,
self.mock_slave2])
mock_set_task_status.assert_called_with(([self.mock_master] +
[self.mock_slave1,
self.mock_slave2]),
InstanceTasks.NONE)
@patch.object(Manager, '_set_task_status')
@patch('trove.taskmanager.manager.LOG')
def test_exception_TroveError_promote_to_replica_source(self, *args):
self.mock_slave2.detach_replica = Mock(side_effect=TroveError)
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
@patch('trove.taskmanager.manager.LOG')
def test_exception_TroveError_eject_replica_source(
self, mock_logging, mock_most_current_replica,
mock_set_tast_status):
self.mock_slave2.detach_replica = Mock(side_effect=TroveError)
mock_most_current_replica.return_value = self.mock_slave1
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.eject_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
def test_error_promote_to_replica_source(self, *args):
self.mock_slave2.detach_replica = Mock(
side_effect=RuntimeError('Error'))
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaisesRegexp(RuntimeError, 'Error',
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
@patch('trove.taskmanager.manager.LOG')
def test_error_demote_replication_master_promote_to_replica_source(
self, mock_logging):
self.mock_old_master.demote_replication_master = Mock(
side_effect=RuntimeError('Error'))
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_slave1, self.mock_old_master,
self.mock_slave2]):
self.assertRaises(ReplicationSlaveAttachError,
self.manager.promote_to_replica_source,
self.context, 'some-inst-id')
@patch.object(Manager, '_set_task_status')
@patch.object(Manager, '_most_current_replica')
def test_error_eject_replica_source(self, mock_most_current_replica,
mock_set_tast_status):
self.mock_slave2.detach_replica = Mock(
side_effect=RuntimeError('Error'))
mock_most_current_replica.return_value = self.mock_slave1
with patch.object(models.BuiltInstanceTasks, 'load',
side_effect=[self.mock_master, self.mock_slave1,
self.mock_slave2]):
self.assertRaisesRegexp(RuntimeError, 'Error',
self.manager.eject_replica_source,
self.context, 'some-inst-id')
@patch.object(Backup, 'delete')
def test_create_replication_slave(self, mock_backup_delete):
mock_tasks = Mock()
mock_snapshot = {'dataset': {'snapshot_id': 'test-id'}}
mock_tasks.get_replication_master_snapshot = Mock(
return_value=mock_snapshot)
mock_flavor = Mock()
with patch.object(models.FreshInstanceTasks, 'load',
return_value=mock_tasks):
self.manager.create_instance(self.context, ['id1'], Mock(),
mock_flavor, Mock(), None, None,
'mysql', 'mysql-server', 2,
'temp-backup-id', None,
'some_password', None, Mock(),
'some-master-id', None, None,
None)
mock_tasks.get_replication_master_snapshot.assert_called_with(
self.context, 'some-master-id', mock_flavor, 'temp-backup-id',
replica_number=1)
mock_backup_delete.assert_called_with(self.context, 'test-id')
@patch.object(models.FreshInstanceTasks, 'load')
@patch.object(Backup, 'delete')
@patch('trove.taskmanager.manager.LOG')
def test_exception_create_replication_slave(self, mock_logging,
mock_delete, mock_load):
mock_load.return_value.create_instance = Mock(side_effect=TroveError)
self.assertRaises(TroveError, self.manager.create_instance,
self.context, ['id1', 'id2'], Mock(), Mock(),
Mock(), None, None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'some_password', None,
Mock(), 'some-master-id', None, None, None)
def test_AttributeError_create_instance(self):
self.assertRaisesRegexp(
AttributeError, 'Cannot create multiple non-replica instances.',
self.manager.create_instance, self.context, ['id1', 'id2'],
Mock(), Mock(), Mock(), None, None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'some_password', None, Mock(), None, None,
None, None)
def test_create_instance(self):
mock_tasks = Mock()
mock_flavor = Mock()
mock_override = Mock()
with patch.object(models.FreshInstanceTasks, 'load',
return_value=mock_tasks):
self.manager.create_instance(self.context, 'id1', 'inst1',
mock_flavor, 'mysql-image-id', None,
None, 'mysql', 'mysql-server', 2,
'temp-backup-id', None, 'password',
None, mock_override, None, None, None,
None)
mock_tasks.create_instance.assert_called_with(mock_flavor,
'mysql-image-id', None,
None, 'mysql',
'mysql-server', 2,
'temp-backup-id', None,
'password', None,
mock_override,
None, None, None, None)
mock_tasks.wait_for_instance.assert_called_with(36000, mock_flavor)
def test_create_cluster(self):
mock_tasks = Mock()
with patch.object(models, 'load_cluster_tasks',
return_value=mock_tasks):
self.manager.create_cluster(self.context, 'some-cluster-id')
mock_tasks.create_cluster.assert_called_with(self.context,
'some-cluster-id')
def test_delete_cluster(self):
mock_tasks = Mock()
with patch.object(models, 'load_cluster_tasks',
return_value=mock_tasks):
self.manager.delete_cluster(self.context, 'some-cluster-id')
mock_tasks.delete_cluster.assert_called_with(self.context,
'some-cluster-id')
class TestTaskManagerService(trove_testtools.TestCase):
def test_app_factory(self):
test_service = service.app_factory(Mock())
self.assertIsInstance(test_service, service.TaskService)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic SeaMicro interfaces.
Provides basic power control of servers in SeaMicro chassis via
python-seamicroclient.
Provides vendor passthru methods for SeaMicro specific functionality.
"""
import os
import re
from oslo_config import cfg
from oslo_utils import importutils
from six.moves.urllib import parse as urlparse
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.openstack.common import log as logging
from ironic.openstack.common import loopingcall
seamicroclient = importutils.try_import('seamicroclient')
if seamicroclient:
from seamicroclient import client as seamicro_client
from seamicroclient import exceptions as seamicro_client_exception
opts = [
cfg.IntOpt('max_retry',
default=3,
help='Maximum retries for SeaMicro operations'),
cfg.IntOpt('action_timeout',
default=10,
help='Seconds to wait for power action to be completed')
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='seamicro',
title='Options for the seamicro power driver')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'hd0',
boot_devices.PXE: 'pxe',
}
REQUIRED_PROPERTIES = {
'seamicro_api_endpoint': _("API endpoint. Required."),
'seamicro_password': _("password. Required."),
'seamicro_server_id': _("server ID. Required."),
'seamicro_username': _("username. Required."),
}
OPTIONAL_PROPERTIES = {
'seamicro_api_version': _("version of SeaMicro API client; default is 2. "
"Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
CONSOLE_PROPERTIES = {
'seamicro_terminal_port': _("node's UDP port to connect to. "
"Only required for console access.")
}
PORT_BASE = 2000
def _get_client(*args, **kwargs):
"""Creates the python-seamicro_client
:param kwargs: A dict of keyword arguments to be passed to the method,
which should contain: 'username', 'password',
'auth_url', 'api_version' parameters.
:returns: SeaMicro API client.
"""
cl_kwargs = {'username': kwargs['username'],
'password': kwargs['password'],
'auth_url': kwargs['api_endpoint']}
try:
return seamicro_client.Client(kwargs['api_version'], **cl_kwargs)
except seamicro_client_exception.UnsupportedVersion as e:
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_api_version' parameter. Reason: %s.") % e)
def _parse_driver_info(node):
"""Parses and creates seamicro driver info
:param node: An Ironic node object.
:returns: SeaMicro driver info.
:raises: MissingParameterValue if any required parameters are missing.
:raises: InvalidParameterValue if required parameter are invalid.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"SeaMicro driver requires the following parameters to be set in"
" node's driver_info: %s.") % missing_info)
api_endpoint = info.get('seamicro_api_endpoint')
username = info.get('seamicro_username')
password = info.get('seamicro_password')
server_id = info.get('seamicro_server_id')
api_version = info.get('seamicro_api_version', "2")
port = info.get('seamicro_terminal_port')
if port:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"SeaMicro terminal port is not an integer."))
r = re.compile(r"(^[0-9]+)/([0-9]+$)")
if not r.match(server_id):
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_server_id' parameter in node's "
"driver_info. Expected format of 'seamicro_server_id' "
"is <int>/<int>"))
url = urlparse.urlparse(api_endpoint)
if (not (url.scheme == "http") or not url.netloc):
raise exception.InvalidParameterValue(_(
"Invalid 'seamicro_api_endpoint' parameter in node's "
"driver_info."))
res = {'username': username,
'password': password,
'api_endpoint': api_endpoint,
'server_id': server_id,
'api_version': api_version,
'uuid': node.uuid,
'port': port}
return res
def _get_server(driver_info):
"""Get server from server_id."""
s_client = _get_client(**driver_info)
return s_client.servers.get(driver_info['server_id'])
def _get_volume(driver_info, volume_id):
"""Get volume from volume_id."""
s_client = _get_client(**driver_info)
return s_client.volumes.get(volume_id)
def _get_power_status(node):
"""Get current power state of this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:raises: ServiceUnavailable on an error from SeaMicro Client.
:returns: Power state of the given node
"""
seamicro_info = _parse_driver_info(node)
try:
server = _get_server(seamicro_info)
if not hasattr(server, 'active') or server.active is None:
return states.ERROR
if not server.active:
return states.POWER_OFF
elif server.active:
return states.POWER_ON
except seamicro_client_exception.NotFound:
raise exception.NodeNotFound(node=node.uuid)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception %(msg)s for node %(uuid)s"),
{'msg': ex.message, 'uuid': node.uuid})
raise exception.ServiceUnavailable(message=ex.message)
def _power_on(node, timeout=None):
"""Power ON this node
:param node: An Ironic node object.
:param timeout: Time in seconds to wait till power on is complete.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node.
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_power_on(state, retries):
"""Called at an interval until the node is powered on."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_ON:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.power_on()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Power-on failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on,
state, retries)
timer.start(interval=timeout).wait()
return state[0]
def _power_off(node, timeout=None):
"""Power OFF this node
:param node: Ironic node one of :class:`ironic.db.models.Node`
:param timeout: Time in seconds to wait till power off is compelete
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_power_off(state, retries):
"""Called at an interval until the node is powered off."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_OFF:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.power_off()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Power-off failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off,
state, retries)
timer.start(interval=timeout).wait()
return state[0]
def _reboot(node, timeout=None):
"""Reboot this node.
:param node: Ironic node one of :class:`ironic.db.models.Node`
:param timeout: Time in seconds to wait till reboot is compelete
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:returns: Power state of the given node
"""
if timeout is None:
timeout = CONF.seamicro.action_timeout
state = [None]
retries = [0]
seamicro_info = _parse_driver_info(node)
server = _get_server(seamicro_info)
def _wait_for_reboot(state, retries):
"""Called at an interval until the node is rebooted successfully."""
state[0] = _get_power_status(node)
if state[0] == states.POWER_ON:
raise loopingcall.LoopingCallDone()
if retries[0] > CONF.seamicro.max_retry:
state[0] = states.ERROR
raise loopingcall.LoopingCallDone()
try:
retries[0] += 1
server.reset()
except seamicro_client_exception.ClientException:
LOG.warning(_LW("Reboot failed for node %s."),
node.uuid)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot,
state, retries)
server.reset()
timer.start(interval=timeout).wait()
return state[0]
def _validate_volume(driver_info, volume_id):
"""Validates if volume is in Storage pools designated for ironic."""
volume = _get_volume(driver_info, volume_id)
# Check if the ironic <scard>/ironic-<pool_id>/<volume_id> naming scheme
# is present in volume id
try:
pool_id = volume.id.split('/')[1].lower()
except IndexError:
pool_id = ""
if "ironic-" in pool_id:
return True
else:
raise exception.InvalidParameterValue(_(
"Invalid volume id specified"))
def _get_pools(driver_info, filters=None):
"""Get SeaMicro storage pools matching given filters."""
s_client = _get_client(**driver_info)
return s_client.pools.list(filters=filters)
def _create_volume(driver_info, volume_size):
"""Create volume in the SeaMicro storage pools designated for ironic."""
ironic_pools = _get_pools(driver_info, filters={'id': 'ironic-'})
if ironic_pools is None:
raise exception.VendorPassthruException(_(
"No storage pools found for ironic"))
least_used_pool = sorted(ironic_pools,
key=lambda x: x.freeSize)[0]
return _get_client(**driver_info).volumes.create(volume_size,
least_used_pool)
def get_telnet_port(driver_info):
"""Get SeaMicro telnet port to listen."""
server_id = int(driver_info['server_id'].split("/")[0])
return PORT_BASE + (10 * server_id)
class Power(base.PowerInterface):
"""SeaMicro Power Interface.
This PowerInterface class provides a mechanism for controlling the power
state of servers in a seamicro chassis.
"""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that node 'driver_info' is valid.
Check that node 'driver_info' contains the required fields.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue if required seamicro parameters are
missing.
"""
_parse_driver_info(task.node)
def get_power_state(self, task):
"""Get the current power state of the task's node.
Poll the host for the current power state of the node.
:param task: a TaskManager instance containing the node to act on.
:raises: ServiceUnavailable on an error from SeaMicro Client.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue when a required parameter is missing
:returns: power state. One of :class:`ironic.common.states`.
"""
return _get_power_status(task.node)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
Set the power state of a node.
:param task: a TaskManager instance containing the node to act on.
:param pstate: Either POWER_ON or POWER_OFF from :class:
`ironic.common.states`.
:raises: InvalidParameterValue if an invalid power state was specified
or a seamicro parameter is invalid.
:raises: MissingParameterValue when a required parameter is missing
:raises: PowerStateFailure if the desired power state couldn't be set.
"""
if pstate == states.POWER_ON:
state = _power_on(task.node)
elif pstate == states.POWER_OFF:
state = _power_off(task.node)
else:
raise exception.InvalidParameterValue(_(
"set_power_state called with invalid power state."))
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if a seamicro parameter is invalid.
:raises: MissingParameterValue if required seamicro parameters are
missing.
:raises: PowerStateFailure if the final state of the node is not
POWER_ON.
"""
state = _reboot(task.node)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class VendorPassthru(base.VendorInterface):
"""SeaMicro vendor-specific methods."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task, method, **kwargs):
_parse_driver_info(task.node)
@base.passthru(['POST'])
def set_node_vlan_id(self, task, **kwargs):
"""Sets a untagged vlan id for NIC 0 of node.
@kwargs vlan_id: id of untagged vlan for NIC 0 of node
"""
node = task.node
vlan_id = kwargs.get('vlan_id')
if not vlan_id:
raise exception.MissingParameterValue(_("No vlan id provided"))
seamicro_info = _parse_driver_info(node)
try:
server = _get_server(seamicro_info)
# remove current vlan for server
if len(server.nic['0']['untaggedVlan']) > 0:
server.unset_untagged_vlan(server.nic['0']['untaggedVlan'])
server = server.refresh(5)
server.set_untagged_vlan(vlan_id)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception: %s"), ex.message)
raise exception.VendorPassthruException(message=ex.message)
properties = node.properties
properties['seamicro_vlan_id'] = vlan_id
node.properties = properties
node.save()
@base.passthru(['POST'])
def attach_volume(self, task, **kwargs):
"""Attach a volume to a node.
Attach volume from SeaMicro storage pools for ironic to node.
If kwargs['volume_id'] not given, Create volume in SeaMicro
storage pool and attach to node.
@kwargs volume_id: id of pre-provisioned volume that is to be attached
as root volume of node
@kwargs volume_size: size of new volume to be created and attached
as root volume of node
"""
node = task.node
seamicro_info = _parse_driver_info(node)
volume_id = kwargs.get('volume_id')
if volume_id is None:
volume_size = kwargs.get('volume_size')
if volume_size is None:
raise exception.MissingParameterValue(
_("No volume size provided for creating volume"))
volume_id = _create_volume(seamicro_info, volume_size)
if _validate_volume(seamicro_info, volume_id):
try:
server = _get_server(seamicro_info)
server.detach_volume()
server = server.refresh(5)
server.attach_volume(volume_id)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("SeaMicro client exception: %s"), ex.message)
raise exception.VendorPassthruException(message=ex.message)
properties = node.properties
properties['seamicro_volume_id'] = volume_id
node.properties = properties
node.save()
class Management(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains SeaMicro credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: MissingParameterValue when a required parameter is missing
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False. Ignored by this driver.
:raises: InvalidParameterValue if an invalid boot device is
specified or if a seamicro parameter is invalid.
:raises: IronicException on an error from seamicro-client.
:raises: MissingParameterValue when a required parameter is missing
"""
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
seamicro_info = _parse_driver_info(task.node)
try:
server = _get_server(seamicro_info)
boot_device = _BOOT_DEVICES_MAP[device]
server.set_boot_order(boot_device)
except seamicro_client_exception.ClientException as ex:
LOG.error(_LE("Seamicro set boot device failed for node "
"%(node)s with the following error: %(error)s"),
{'node': task.node.uuid, 'error': ex.message})
raise exception.IronicException(message=ex.message)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node. Be aware that not
all drivers support this.
:param task: a task from TaskManager.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
# TODO(lucasagomes): The python-seamicroclient library currently
# doesn't expose a method to get the boot device, update it once
# it's implemented.
return {'boot_device': None, 'persistent': None}
def get_sensors_data(self, task):
"""Get sensors data method.
Not implemented by this driver.
:param task: a TaskManager instance.
"""
raise NotImplementedError()
class ShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses telnet and shellinabox."""
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a task from TaskManager.
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'seamicro_terminal_port' parameter in node's "
"driver_info"))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a task from TaskManager
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: ConsoleError if the directory for the PID file cannot be
created
:raises: ConsoleSubprocessFailed when invoking the subprocess failed
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
telnet_port = get_telnet_port(driver_info)
chassis_ip = urlparse.urlparse(driver_info['api_endpoint']).netloc
seamicro_cmd = ("/:%(uid)s:%(gid)s:HOME:telnet %(chassis)s %(port)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'chassis': chassis_ip,
'port': telnet_port})
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
seamicro_cmd)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a task from TaskManager
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: ConsoleError if unable to stop the console
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
console_utils.stop_shellinabox_console(driver_info['uuid'])
def get_console(self, task):
"""Get the type and connection information about the console.
:raises: MissingParameterValue if required seamicro parameters are
missing
:raises: InvalidParameterValue if required parameter are invalid.
"""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
| |
from benchmark.fortune_html_parser import FortuneHTMLParser
from setup.linux import setup_util
from benchmark.test_types import *
import importlib
import os
import subprocess
import time
import re
from pprint import pprint
import sys
import traceback
import json
import logging
import csv
import shlex
import math
from collections import OrderedDict
from requests import ConnectionError
from threading import Thread
from threading import Event
from utils import header
# Cross-platform colored text
from colorama import Fore, Back, Style
from datetime import datetime
from datetime import timedelta
class FrameworkTest:
headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
# Used for test types that require no pipelining or query string params.
concurrency_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Concurrency: $c for {name}"
echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
{wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
# Used for test types that require pipelining.
pipeline_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " {wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " {wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Concurrency: $c for {name}"
echo " {wrk} {headers} --latency -d {duration} -c $c --timeout $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\" -s ~/pipeline.lua -- {pipeline}"
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
{wrk} {headers} --latency -d {duration} -c $c --timeout $c -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url} -s ~/pipeline.lua -- {pipeline}
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
# Used for test types that require a database -
# These tests run at a static concurrency level and vary the size of
# the query sent with each request
query_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} --latency -d 5 -c 8 --timeout 8 -t 8 "http://{server_host}:{port}{url}2"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Synchronizing time"
echo "---------------------------------------------------------"
echo ""
ntpdate -s pool.ntp.org
for c in {levels}
do
echo ""
echo "---------------------------------------------------------"
echo " Queries: $c for {name}"
echo " wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
echo "---------------------------------------------------------"
echo ""
STARTTIME=$(date +"%s")
wrk {headers} --latency -d {duration} -c {max_concurrency} --timeout {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
echo "STARTTIME $STARTTIME"
echo "ENDTIME $(date +"%s")"
sleep 2
done
"""
############################################################
# start(benchmarker)
# Start the test using it's setup file
############################################################
def start(self, out, err):
# Setup environment variables
logDir = os.path.join(self.fwroot, self.benchmarker.latest_results_directory, 'logs', self.name.lower())
bash_functions_path= os.path.join(self.fwroot, 'toolset/setup/linux/bash_functions.sh')
setup_util.replace_environ(config='$FWROOT/config/benchmark_profile',
command='''\
export TROOT=%s && \
export IROOT=%s && \
export DBHOST=%s && \
export LOGDIR=%s && \
export MAX_THREADS=%s && \
export MAX_CONCURRENCY=%s \
''' % (
self.directory,
self.install_root,
self.database_host,
logDir,
self.benchmarker.threads,
max(self.benchmarker.concurrency_levels)))
# Always ensure that IROOT belongs to the runner_user
chown = "sudo chown -R %s:%s %s" % (self.benchmarker.runner_user,
self.benchmarker.runner_user, os.path.join(self.fwroot, self.install_root))
subprocess.check_call(chown, shell=True, cwd=self.fwroot, executable='/bin/bash')
# Run the module start inside parent of TROOT
# - we use the parent as a historical accident, a number of tests
# refer to their TROOT maually still
previousDir = os.getcwd()
os.chdir(os.path.dirname(self.troot))
logging.info("Running setup module start (cwd=%s)", self.directory)
# Run the start script for the test as the "testrunner" user
#
# `sudo` - Switching user requires superuser privs
# -u [username] The username
# -E Preserves the current environment variables
# -H Forces the home var (~) to be reset to the user specified
# `stdbuf` - Disable buffering, send output to python ASAP
# -o0 zero-sized buffer for stdout
# -e0 zero-sized buffer for stderr
# `bash` - Run the setup.sh script using bash
# -e Force bash to exit on first error
# -x Turn on bash tracing e.g. print commands before running
#
# Most servers do not output to stdout/stderr while serving
# requests so there is no performance hit from disabling
# output buffering. This disabling is necessary to
# a) allow TFB to show output in real time and b) avoid loosing
# output in the buffer when the testrunner processes are forcibly
# killed
#
# See http://www.pixelbeat.org/programming/stdio_buffering/
# See https://blogs.gnome.org/markmc/2013/06/04/async-io-and-python/
# See http://eyalarubas.com/python-subproc-nonblock.html
command = 'sudo -u %s -E -H stdbuf -o0 -e0 bash -exc "source %s && source %s.sh"' % (
self.benchmarker.runner_user,
bash_functions_path,
os.path.join(self.troot, self.setup_file))
debug_command = '''\
export FWROOT=%s && \\
export TROOT=%s && \\
export IROOT=%s && \\
export DBHOST=%s && \\
export LOGDIR=%s && \\
export MAX_THREADS=%s && \\
export MAX_CONCURRENCY=%s && \\
cd %s && \\
%s''' % (self.fwroot,
self.directory,
self.install_root,
self.database_host,
logDir,
self.benchmarker.threads,
self.directory,
max(self.benchmarker.concurrency_levels),
command)
logging.info("To run %s manually, copy/paste this:\n%s", self.name, debug_command)
def tee_output(prefix, line):
# Needs to be one atomic write
# Explicitly use UTF-8 as it's the most common framework output
# TODO improve encoding handling
line = prefix.encode('utf-8') + line
# Log to current terminal
sys.stdout.write(line)
sys.stdout.flush()
# logging.error("".join([prefix, line]))
out.write(line)
out.flush()
# Start the setup.sh command
p = subprocess.Popen(command, cwd=self.directory,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
nbsr = setup_util.NonBlockingStreamReader(p.stdout,
"%s: %s.sh and framework processes have terminated" % (self.name, self.setup_file))
# Set a limit on total execution time of setup.sh
timeout = datetime.now() + timedelta(minutes = 105)
time_remaining = timeout - datetime.now()
# Need to print to stdout once every 10 minutes or Travis-CI will abort
travis_timeout = datetime.now() + timedelta(minutes = 5)
# Flush output until setup.sh work is finished. This is
# either a) when setup.sh exits b) when the port is bound
# c) when we run out of time. Note that 'finished' doesn't
# guarantee setup.sh process is dead - the OS may choose to make
# setup.sh a zombie process if it still has living children
#
# Note: child processes forked (using &) will remain alive
# after setup.sh has exited. The will have inherited the
# stdout/stderr descriptors and will be directing their
# output to the pipes.
#
prefix = "Setup %s: " % self.name
while (p.poll() is None
and not self.benchmarker.is_port_bound(self.port)
and not time_remaining.total_seconds() < 0):
# The conditions above are slow to check, so
# we will delay output substantially if we only
# print one line per condition check.
# Adding a tight loop here mitigates the effect,
# ensuring that most of the output directly from
# setup.sh is sent to tee_output before the outer
# loop exits and prints things like "setup.sh exited"
#
for i in xrange(10):
try:
line = nbsr.readline(0.05)
if line:
tee_output(prefix, line)
# Reset Travis-CI timer
travis_timeout = datetime.now() + timedelta(minutes = 5)
except setup_util.EndOfStream:
tee_output(prefix, "Setup has terminated\n")
break
time_remaining = timeout - datetime.now()
if (travis_timeout - datetime.now()).total_seconds() < 0:
sys.stdout.write(prefix + 'Printing so Travis-CI does not time out\n')
sys.stdout.write(prefix + "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
sys.stdout.flush()
travis_timeout = datetime.now() + timedelta(minutes = 5)
# Did we time out?
if time_remaining.total_seconds() < 0:
tee_output(prefix, "%s.sh timed out!! Aborting...\n" % self.setup_file)
p.kill()
return 1
# What's our return code?
# If setup.sh has terminated, use that code
# Otherwise, detect if the port was bound
tee_output(prefix, "Status: Poll: %s, Port %s bound: %s, Time Left: %s\n" % (
p.poll(), self.port, self.benchmarker.is_port_bound(self.port), time_remaining))
retcode = (p.poll() if p.poll() is not None else 0 if self.benchmarker.is_port_bound(self.port) else 1)
if p.poll() is not None:
tee_output(prefix, "%s.sh process exited naturally with %s\n" % (self.setup_file, p.poll()))
elif self.benchmarker.is_port_bound(self.port):
tee_output(prefix, "Bound port detected on %s\n" % self.port)
# Before we return control to the benchmarker, spin up a
# thread to keep an eye on the pipes in case the running
# framework uses stdout/stderr. Once all processes accessing
# the subprocess.PIPEs are dead, this thread will terminate.
# Use a different prefix to indicate this is the framework
# speaking
prefix = "Server %s: " % self.name
def watch_child_pipes(nbsr, prefix):
while True:
try:
line = nbsr.readline(60)
if line:
tee_output(prefix, line)
except setup_util.EndOfStream:
tee_output(prefix, "Framework processes have terminated\n")
return
watch_thread = Thread(target = watch_child_pipes,
args = (nbsr, prefix))
watch_thread.daemon = True
watch_thread.start()
logging.info("Executed %s.sh, returning %s", self.setup_file, retcode)
os.chdir(previousDir)
return retcode
############################################################
# End start
############################################################
############################################################
# verify_urls
# Verifys each of the URLs for this test. THis will sinply
# curl the URL and check for it's return status.
# For each url, a flag will be set on this object for whether
# or not it passed
# Returns True if all verifications succeeded
############################################################
def verify_urls(self, out, err):
result = True
def verify_type(test_type):
test = self.runTests[test_type]
test.setup_out_err(out, err)
out.write(header("VERIFYING %s" % test_type.upper()))
base_url = "http://%s:%s" % (self.benchmarker.server_host, self.port)
try:
results = test.verify(base_url)
except ConnectionError as e:
results = [('fail',"Server did not respond to request")]
logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
except Exception as e:
results = [('fail',"""Caused Exception in TFB
This almost certainly means your return value is incorrect,
but also that you have found a bug. Please submit an issue
including this message: %s\n%s""" % (e, traceback.format_exc()),
base_url)]
logging.warning("Verifying test %s for %s caused an exception: %s", test_type, self.name, e)
traceback.format_exc()
test.failed = any(result == 'fail' for (result, reason, url) in results)
test.warned = any(result == 'warn' for (result, reason, url) in results)
test.passed = all(result == 'pass' for (result, reason, url) in results)
def output_result(result, reason, url):
specific_rules_url = "http://frameworkbenchmarks.readthedocs.org/en/latest/Project-Information/Framework-Tests/#specific-test-requirements"
color = Fore.GREEN
if result.upper() == "WARN":
color = Fore.YELLOW
elif result.upper() == "FAIL":
color = Fore.RED
out.write((" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url))
print (" " + color + "%s" + Style.RESET_ALL + " for %s\n") % (result.upper(), url)
if reason is not None and len(reason) != 0:
for line in reason.splitlines():
out.write(" " + line + '\n')
print " " + line
if not test.passed:
out.write(" See %s\n" % specific_rules_url)
print " See %s\n" % specific_rules_url
[output_result(r1,r2,url) for (r1, r2, url) in results]
if test.failed:
self.benchmarker.report_verify_results(self, test_type, 'fail')
elif test.warned:
self.benchmarker.report_verify_results(self, test_type, 'warn')
elif test.passed:
self.benchmarker.report_verify_results(self, test_type, 'pass')
else:
raise Exception("Unknown error - test did not pass,warn,or fail")
result = True
for test_type in self.runTests:
verify_type(test_type)
if self.runTests[test_type].failed:
result = False
return result
############################################################
# End verify_urls
############################################################
############################################################
# benchmark
# Runs the benchmark for each type of test that it implements
# JSON/DB/Query.
############################################################
def benchmark(self, out, err):
def benchmark_type(test_type):
out.write("BENCHMARKING %s ... " % test_type.upper())
test = self.runTests[test_type]
test.setup_out_err(out, err)
output_file = self.benchmarker.output_file(self.name, test_type)
if not os.path.exists(output_file):
# Open to create the empty file
with open(output_file, 'w'):
pass
if not test.failed:
if test_type == 'plaintext': # One special case
remote_script = self.__generate_pipeline_script(test.get_url(), self.port, test.accept_header)
elif test_type == 'query' or test_type == 'update':
remote_script = self.__generate_query_script(test.get_url(), self.port, test.accept_header)
else:
remote_script = self.__generate_concurrency_script(test.get_url(), self.port, test.accept_header)
# Begin resource usage metrics collection
self.__begin_logging(test_type)
# Run the benchmark
with open(output_file, 'w') as raw_file:
p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
p.communicate(remote_script)
err.flush()
# End resource usage metrics collection
self.__end_logging()
results = self.__parse_test(test_type)
print "Benchmark results:"
pprint(results)
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
out.write( "Complete\n" )
out.flush()
for test_type in self.runTests:
benchmark_type(test_type)
############################################################
# End benchmark
############################################################
############################################################
# parse_all
# Method meant to be run for a given timestamp
############################################################
def parse_all(self):
for test_type in self.runTests:
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
results = self.__parse_test(test_type)
self.benchmarker.report_benchmark_results(framework=self, test=test_type, results=results['results'])
##########################################################################################
# Private Methods
##########################################################################################
############################################################
# __parse_test(test_type)
############################################################
def __parse_test(self, test_type):
try:
results = dict()
results['results'] = []
stats = []
if os.path.exists(self.benchmarker.get_output_file(self.name, test_type)):
with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData == None:
rawData = dict()
results['results'].append(rawData)
#if "Requests/sec:" in line:
# m = re.search("Requests/sec:\s+([0-9]+)", line)
# rawData['reportedResults'] = m.group(1)
# search for weighttp data such as succeeded and failed.
if "Latency" in line:
m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
# rawData['latencyStdevPercent'] = m[3]
#if "Req/Sec" in line:
# m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
# if len(m) == 4:
# rawData['requestsAvg'] = m[0]
# rawData['requestsStdev'] = m[1]
# rawData['requestsMax'] = m[2]
# rawData['requestsStdevPercent'] = m[3]
#if "requests in" in line:
# m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
# if m != None:
# # parse out the raw time, which may be in minutes or seconds
# raw_time = m.group(1)
# if "ms" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
# elif "s" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1])
# elif "m" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
# elif "h" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m != None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
if m != None:
rawData['5xx'] = int(m.group(1))
if "STARTTIME" in line:
m = re.search("[0-9]+", line)
rawData["startTime"] = int(m.group(0))
if "ENDTIME" in line:
m = re.search("[0-9]+", line)
rawData["endTime"] = int(m.group(0))
test_stats = self.__parse_stats(test_type, rawData["startTime"], rawData["endTime"], 1)
# rawData["averageStats"] = self.__calculate_average_stats(test_stats)
stats.append(test_stats)
with open(self.benchmarker.stats_file(self.name, test_type) + ".json", "w") as stats_file:
json.dump(stats, stats_file, indent=2)
return results
except IOError:
return None
############################################################
# End benchmark
############################################################
############################################################
# __generate_concurrency_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable concurrency tests (JSON
# and DB)
############################################################
def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk"):
headers = self.headers_template.format(accept=accept_header)
return self.concurrency_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in self.benchmarker.concurrency_levels),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command)
############################################################
# __generate_pipeline_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single pipeline test.
############################################################
def __generate_pipeline_script(self, url, port, accept_header, wrk_command="wrk"):
headers = self.headers_template.format(accept=accept_header)
return self.pipeline_template.format(max_concurrency=16384,
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in [256,1024,4096,16384]),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
pipeline=16)
############################################################
# __generate_query_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable query tests (Query)
############################################################
def __generate_query_script(self, url, port, accept_header):
headers = self.headers_template.format(accept=accept_header)
return self.query_template.format(max_concurrency=max(self.benchmarker.concurrency_levels),
max_threads=self.benchmarker.threads, name=self.name, duration=self.benchmarker.duration,
levels=" ".join("{}".format(item) for item in self.benchmarker.query_levels),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
############################################################
# Returns True if any test type this this framework test will use a DB
############################################################
def requires_database(self):
'''Returns True/False if this test requires a database'''
return any(tobj.requires_db for (ttype,tobj) in self.runTests.iteritems())
############################################################
# __begin_logging
# Starts a thread to monitor the resource usage, to be synced with the client's time
# TODO: MySQL and InnoDB are possible. Figure out how to implement them.
############################################################
def __begin_logging(self, test_type):
output_file = "{file_name}".format(file_name=self.benchmarker.get_stats_file(self.name, test_type))
dstat_string = "dstat -afilmprsT --aio --fs --ipc --lock --raw --socket --tcp \
--raw --socket --tcp --udp --unix --vm --disk-util \
--rpc --rpcd --output {output_file}".format(output_file=output_file)
cmd = shlex.split(dstat_string)
dev_null = open(os.devnull, "w")
self.subprocess_handle = subprocess.Popen(cmd, stdout=dev_null)
##############################################################
# Begin __end_logging
# Stops the logger thread and blocks until shutdown is complete.
##############################################################
def __end_logging(self):
self.subprocess_handle.terminate()
self.subprocess_handle.communicate()
##############################################################
# Begin __parse_stats
# For each test type, process all the statistics, and return a multi-layered dictionary
# that has a structure as follows:
# (timestamp)
# | (main header) - group that the stat is in
# | | (sub header) - title of the stat
# | | | (stat) - the stat itself, usually a floating point number
##############################################################
def __parse_stats(self, test_type, start_time, end_time, interval):
stats_dict = dict()
stats_file = self.benchmarker.stats_file(self.name, test_type)
with open(stats_file) as stats:
while(stats.next() != "\n"): # dstat doesn't output a completely compliant CSV file - we need to strip the header
pass
stats_reader = csv.reader(stats)
main_header = stats_reader.next()
sub_header = stats_reader.next()
time_row = sub_header.index("epoch")
int_counter = 0
for row in stats_reader:
time = float(row[time_row])
int_counter+=1
if time < start_time:
continue
elif time > end_time:
return stats_dict
if int_counter % interval != 0:
continue
row_dict = dict()
for nextheader in main_header:
if nextheader != "":
row_dict[nextheader] = dict()
header = ""
for item_num, column in enumerate(row):
if(len(main_header[item_num]) != 0):
header = main_header[item_num]
row_dict[header][sub_header[item_num]] = float(column) # all the stats are numbers, so we want to make sure that they stay that way in json
stats_dict[time] = row_dict
return stats_dict
##############################################################
# End __parse_stats
##############################################################
def __getattr__(self, name):
"""For backwards compatibility, we used to pass benchmarker
as the argument to the setup.sh files"""
try:
x = getattr(self.benchmarker, name)
except AttributeError:
print "AttributeError: %s not a member of FrameworkTest or Benchmarker" % name
print "This is probably a bug"
raise
return x
##############################################################
# Begin __calculate_average_stats
# We have a large amount of raw data for the statistics that
# may be useful for the stats nerds, but most people care about
# a couple of numbers. For now, we're only going to supply:
# * Average CPU
# * Average Memory
# * Total network use
# * Total disk use
# More may be added in the future. If they are, please update
# the above list.
# Note: raw_stats is directly from the __parse_stats method.
# Recall that this consists of a dictionary of timestamps,
# each of which contain a dictionary of stat categories which
# contain a dictionary of stats
##############################################################
def __calculate_average_stats(self, raw_stats):
raw_stat_collection = dict()
for timestamp, time_dict in raw_stats.items():
for main_header, sub_headers in time_dict.items():
item_to_append = None
if 'cpu' in main_header:
# We want to take the idl stat and subtract it from 100
# to get the time that the CPU is NOT idle.
item_to_append = sub_headers['idl'] - 100.0
elif main_header == 'memory usage':
item_to_append = sub_headers['used']
elif 'net' in main_header:
# Network stats have two parts - recieve and send. We'll use a tuple of
# style (recieve, send)
item_to_append = (sub_headers['recv'], sub_headers['send'])
elif 'dsk' or 'io' in main_header:
# Similar for network, except our tuple looks like (read, write)
item_to_append = (sub_headers['read'], sub_headers['writ'])
if item_to_append is not None:
if main_header not in raw_stat_collection:
raw_stat_collection[main_header] = list()
raw_stat_collection[main_header].append(item_to_append)
# Simple function to determine human readable size
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num):
# We'll assume that any number we get is convertable to a float, just in case
num = float(num)
for x in ['bytes','KB','MB','GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
# Now we have our raw stats in a readable format - we need to format it for display
# We need a floating point sum, so the built in sum doesn't cut it
display_stat_collection = dict()
for header, values in raw_stat_collection.items():
display_stat = None
if 'cpu' in header:
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif main_header == 'memory usage':
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif 'net' in main_header:
receive, send = zip(*values) # unzip
display_stat = {'receive': sizeof_fmt(math.fsum(receive)), 'send': sizeof_fmt(math.fsum(send))}
else: # if 'dsk' or 'io' in header:
read, write = zip(*values) # unzip
display_stat = {'read': sizeof_fmt(math.fsum(read)), 'write': sizeof_fmt(math.fsum(write))}
display_stat_collection[header] = display_stat
return display_stat
###########################################################################################
# End __calculate_average_stats
#########################################################################################
##########################################################################################
# Constructor
##########################################################################################
def __init__(self, name, directory, benchmarker, runTests, args):
self.name = name
self.directory = directory
self.benchmarker = benchmarker
self.runTests = runTests
self.fwroot = benchmarker.fwroot
self.approach = ""
self.classification = ""
self.database = ""
self.framework = ""
self.language = ""
self.orm = ""
self.platform = ""
self.webserver = ""
self.os = ""
self.database_os = ""
self.display_name = ""
self.notes = ""
self.versus = ""
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
self.install_root="%s/%s" % (self.fwroot, "installs")
if benchmarker.install_strategy is 'pertest':
self.install_root="%s/pertest/%s" % (self.install_root, name)
# Used in setup.sh scripts for consistency with
# the bash environment variables
self.troot = self.directory
self.iroot = self.install_root
self.__dict__.update(args)
############################################################
# End __init__
############################################################
############################################################
# End FrameworkTest
############################################################
# Static methods
def test_order(type_name):
"""
This sort ordering is set up specifically to return the length
of the test name. There were SO many problems involved with
'plaintext' being run first (rather, just not last) that we
needed to ensure that it was run last for every framework.
"""
return len(type_name)
def validate_urls(test_name, test_keys):
"""
Separated from validate_test because urls are not required anywhere. We know a url is incorrect if it is
empty or does not start with a "/" character. There is no validation done to ensure the url conforms to
the suggested url specifications, although those suggestions are presented if a url fails validation here.
"""
example_urls = {
"json_url": "/json",
"db_url": "/mysql/db",
"query_url": "/mysql/queries?queries= or /mysql/queries/",
"fortune_url": "/mysql/fortunes",
"update_url": "/mysql/updates?queries= or /mysql/updates/",
"plaintext_url": "/plaintext"
}
for test_url in ["json_url","db_url","query_url","fortune_url","update_url","plaintext_url"]:
key_value = test_keys.get(test_url, None)
if key_value != None and not key_value.startswith('/'):
errmsg = """`%s` field in test \"%s\" does not appear to be a valid url: \"%s\"\n
Example `%s` url: \"%s\"
""" % (test_url, test_name, key_value, test_url, example_urls[test_url])
raise Exception(errmsg)
def validate_test(test_name, test_keys, directory):
"""
Validate benchmark config values for this test based on a schema
"""
# Ensure that each FrameworkTest has a framework property, inheriting from top-level if not
if not test_keys['framework']:
test_keys['framework'] = config['framework']
recommended_lang = directory.split('/')[-2]
windows_url = "https://github.com/TechEmpower/FrameworkBenchmarks/milestones/Windows%%20Compatibility"
schema = {
'language': {
'help': ('language', 'The language of the framework used, suggestion: %s' % recommended_lang)
},
'webserver': {
'help': ('webserver', 'Name of the webserver also referred to as the "front-end server"')
},
'classification': {
'allowed': [
('Fullstack', '...'),
('Micro', '...'),
('Platform', '...')
]
},
'database': {
'allowed': [
('MySQL', 'One of the most popular databases around the web and in TFB'),
('Postgres', 'An advanced SQL database with a larger feature set than MySQL'),
('MongoDB', 'A popular document-store database'),
('Cassandra', 'A highly performant and scalable NoSQL database'),
('Elasticsearch', 'A distributed RESTful search engine that is used as a database for TFB tests'),
('Redis', 'An open-sourced, BSD licensed, advanced key-value cache and store'),
('SQLite', 'A network-less database, still supported for backwards compatibility'),
('SQLServer', 'Microsoft\'s SQL implementation'),
('None', 'No database was used for these tests, as is the case with Json Serialization and Plaintext')
]
},
'approach': {
'allowed': [
('Realistic', '...'),
('Stripped', '...')
]
},
'orm': {
'allowed': [
('Full', 'Has a full suite of features like lazy loading, caching, multiple language support, sometimes pre-configured with scripts.'),
('Micro', 'Has basic database driver capabilities such as establishing a connection and sending queries.'),
('Raw', 'Tests that do not use an ORM will be classified as "raw" meaning they use the platform\'s raw database connectivity.')
]
},
'platform': {
'help': ('platform', 'Name of the platform this framework runs on, e.g. Node.js, Pypy, hhvm, JRuby ...')
},
'framework': {
# Guranteed to be here and correct at this point
# key is left here to produce the set of required keys
},
'os': {
'allowed': [
('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'),
('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url)
]
},
'database_os': {
'allowed': [
('Linux', 'Our best-supported host OS, it is recommended that you build your tests for Linux hosts'),
('Windows', 'TFB is not fully-compatible on windows, contribute towards our work on compatibility: %s' % windows_url)
]
}
}
# Confirm required keys are present
required_keys = schema.keys()
missing = list(set(required_keys) - set(test_keys))
if len(missing) > 0:
missingstr = (", ").join(map(str, missing))
raise Exception("benchmark_config.json for test %s is invalid, please amend by adding the following required keys: [%s]"
% (test_name, missingstr))
# Check the (all optional) test urls
validate_urls(test_name, test_keys)
# Check values of keys against schema
for key in required_keys:
val = test_keys.get(key, "").lower()
has_predefined_acceptables = 'allowed' in schema[key]
if has_predefined_acceptables:
allowed = schema[key].get('allowed', [])
acceptable_values, descriptors = zip(*allowed)
acceptable_values = [a.lower() for a in acceptable_values]
if val not in acceptable_values:
msg = ("Invalid `%s` value specified for test \"%s\" in framework \"%s\"; suggestions:\n"
% (key, test_name, config['framework']))
helpinfo = ('\n').join([" `%s` -- %s" % (v, desc) for (v, desc) in zip(acceptable_values, descriptors)])
fullerr = msg + helpinfo + "\n"
raise Exception(fullerr)
elif not has_predefined_acceptables and val == "":
msg = ("Value for `%s` in test \"%s\" in framework \"%s\" was missing:\n"
% (key, test_name, config['framework']))
helpinfo = " %s -- %s" % schema[key]['help']
fullerr = msg + helpinfo + '\n'
raise Exception(fullerr)
def parse_config(config, directory, benchmarker):
"""
Parses a config file into a list of FrameworkTest objects
"""
tests = []
# The config object can specify multiple tests
# Loop over them and parse each into a FrameworkTest
for test in config['tests']:
tests_to_run = [name for (name,keys) in test.iteritems()]
if "default" not in tests_to_run:
logging.warn("Framework %s does not define a default test in benchmark_config.json", config['framework'])
# Check that each test configuration is acceptable
# Throw exceptions if a field is missing, or how to improve the field
for test_name, test_keys in test.iteritems():
# Validates the benchmark_config entry
validate_test(test_name, test_keys, directory)
# Map test type to a parsed FrameworkTestType object
runTests = dict()
for type_name, type_obj in benchmarker.types.iteritems():
try:
# Makes a FrameWorkTestType object using some of the keys in config
# e.g. JsonTestType uses "json_url"
runTests[type_name] = type_obj.copy().parse(test_keys)
except AttributeError as ae:
# This is quite common - most tests don't support all types
# Quitely log it and move on (debug logging is on in travis and this causes
# ~1500 lines of debug, so I'm totally ignoring it for now
# logging.debug("Missing arguments for test type %s for framework test %s", type_name, test_name)
pass
# We need to sort by test_type to run
sortedTestKeys = sorted(runTests.keys(), key=test_order)
sortedRunTests = OrderedDict()
for sortedTestKey in sortedTestKeys:
sortedRunTests[sortedTestKey] = runTests[sortedTestKey]
# Prefix all test names with framework except 'default' test
# Done at the end so we may still refer to the primary test as `default` in benchmark config error messages
if test_name == 'default':
test_name = config['framework']
else:
test_name = "%s-%s" % (config['framework'], test_name)
# By passing the entire set of keys, each FrameworkTest will have a member for each key
tests.append(FrameworkTest(test_name, directory, benchmarker, sortedRunTests, test_keys))
return tests
| |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer-based language models."""
from typing import Callable, Any, Optional
from flax import linen as nn
from flax import struct
import jax.numpy as jnp
import numpy as np
@struct.dataclass
class TransformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int
output_vocab_size: int
dtype: Any = jnp.float32
emb_dim: int = 512
num_heads: int = 8
num_layers: int = 6
qkv_dim: int = 512
mlp_dim: int = 2048
max_len: int = 2048
dropout_rate: float = 0.3
attention_dropout_rate: float = 0.3
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Optional[Callable] = None
def sinusoidal_init(max_len=2048):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
div_term = np.exp(
np.arange(0, d_feature, 2) * -(np.log(10000.0) / d_feature))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init in the configuration.
Args:
inputs: input data.
Returns:
output: `(bs, timesteps, in_dim)`
"""
cfg = self.config
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
length = inputs.shape[1]
pos_emb_shape = (1, cfg.max_len, inputs.shape[-1])
if cfg.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(max_len=cfg.max_len)(
None, pos_emb_shape, None)
else:
pos_embedding = self.param('pos_embedding',
cfg.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
return inputs + pe
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
config: TransformerConfig
out_dim: Optional[int] = None
@nn.compact
def __call__(self, inputs, deterministic=True):
"""Applies Transformer MlpBlock module."""
cfg = self.config
actual_out_dim = (inputs.shape[-1] if self.out_dim is None
else self.out_dim)
x = nn.Dense(cfg.mlp_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(inputs)
x = nn.elu(x)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=deterministic)
output = nn.Dense(actual_out_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(x)
output = nn.Dropout(rate=cfg.dropout_rate)(
output, deterministic=deterministic)
return output
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs, deterministic):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
deterministic: if true dropout is applied otherwise not.
Returns:
output after transformer encoder block.
"""
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(inputs)
x = nn.SelfAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=deterministic)(x)
x = nn.Dropout(rate=cfg.dropout_rate)(
x, deterministic=deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = MlpBlock(config=cfg)(y, deterministic=deterministic)
return x + y
class Transformer(nn.Module):
"""Transformer Model for sequence tagging."""
config: TransformerConfig
@nn.compact
def __call__(self, *, inputs, train):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
train: if it is training.
Returns:
output of a transformer encoder.
"""
padding_mask = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)[..., None]
assert inputs.ndim == 2 # (batch, len)
cfg = self.config
x = inputs.astype('int32')
x = nn.Embed(num_embeddings=cfg.vocab_size, features=cfg.emb_dim, name='embed')(x)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=not train)
x = AddPositionEmbs(cfg)(x)
for _ in range(cfg.num_layers):
x = Encoder1DBlock(cfg)(x, deterministic=not train)
x = nn.LayerNorm(dtype=cfg.dtype)(x)
logits = nn.Dense(
cfg.output_vocab_size,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(x)
return logits
| |
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks related to Grading Survey Groups and Records."""
import datetime
from google.appengine.api import taskqueue
from google.appengine.ext import db
from google.appengine.ext import ndb
from django import http
from django.conf.urls import url
from soc.logic import mail_dispatcher
from soc.logic import site
from soc.tasks.helper import error_handler
from soc.modules.gsoc.logic import grading_record
from soc.modules.gsoc.logic import profile as profile_logic
from soc.modules.gsoc.models.grading_record import GSoCGradingRecord
from soc.modules.gsoc.models.grading_survey_group import GSoCGradingSurveyGroup
from soc.modules.gsoc.models.project import GSoCProject
class GradingRecordTasks(object):
"""Tasks that are involved in dealing with GradingRecords.
"""
# batch size to use when going through GSoCProject entities
DEF_BATCH_SIZE = 25
def djangoURLPatterns(self):
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [
url(r'tasks/gsoc/grading_record/update_records$',
self.updateRecordsForSurveyGroup),
url(r'tasks/gsoc/grading_record/update_projects$',
self.updateProjectsForSurveyGroup),
url(r'tasks/gsoc/grading_record/mail_result',
self.sendMailAboutGradingRecordResult)]
return patterns
def updateRecordsForSurveyGroup(self, request, *args, **kwargs):
"""Updates or creates GradingRecords for the given GradingSurveyGroup.
Expects the following to be present in the POST dict:
group_key: Specifies the GradingSurveyGroup key name.
cursor: optional to specify where the query should continue from.
Args:
request: Django Request object
"""
post_dict = request.POST
group_key = post_dict.get('group_key')
if not group_key:
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid updateRecordForSurveyGroup data: %s' % post_dict)
# get the GradingSurveyGroup for the given key
survey_group = GSoCGradingSurveyGroup.get_by_id(int(group_key))
if not survey_group:
# invalid GradingSurveyGroup specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid GradingSurveyGroup specified: %s' % group_key)
q = GSoCProject.all()
q.filter('program', survey_group.program)
q.filter('status', 'accepted')
if 'cursor' in post_dict:
q.with_cursor(post_dict['cursor'])
# get the first batch_size number of projects
projects = q.fetch(self.DEF_BATCH_SIZE)
if not projects:
# task completed, update timestamp for last update complete
survey_group.last_update_complete = datetime.datetime.now()
survey_group.put()
return http.HttpResponse()
# update/create and batch put the new GradingRecords
grading_record.updateOrCreateRecordsFor(survey_group, projects)
# pass along these params as POST to the new task
task_params = {'group_key': group_key,
'cursor': q.cursor()}
new_task = taskqueue.Task(params=task_params, url=request.path)
new_task.add()
# task completed, return OK
return http.HttpResponse('OK')
def updateProjectsForSurveyGroup(self, request, *args, **kwargs):
"""Updates each GSoCProject for which a GradingRecord is found.
Expects the following to be present in the POST dict:
group_key: Specifies the GradingSurveyGroup key name.
cursor: Optional, specifies the cursor for the GadingRecord query.
send_mail: Optional, if this string evaluates to True mail will be send
for each GradingRecord that's processed.
Args:
request: Django Request object
"""
post_dict = request.POST
group_key = post_dict.get('group_key')
if not group_key:
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid updateRecordForSurveyGroup data: %s' % post_dict)
# get the GradingSurveyGroup for the given keyname
survey_group = GSoCGradingSurveyGroup.get_by_id(int(group_key))
if not survey_group:
# invalid GradingSurveyGroup specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid GradingSurveyGroup specified: %s' % group_key)
q = GSoCGradingRecord.all()
q.filter('grading_survey_group', survey_group)
if 'cursor' in post_dict:
q.with_cursor(post_dict['cursor'])
# get the first batch_size number of GradingRecords
records = q.fetch(self.DEF_BATCH_SIZE)
if not records:
# we are done
return http.HttpResponse()
grading_record.updateProjectsForGradingRecords(records)
# check if we need to send an email for each GradingRecord
send_mail = post_dict.get('send_mail', '')
if send_mail:
# enqueue a task to send mail for each GradingRecord
for record in records:
# pass along these params as POST to the new task
task_params = {'record_key': str(record.key())}
task_url = '/tasks/gsoc/grading_record/mail_result'
mail_task = taskqueue.Task(params=task_params, url=task_url)
mail_task.add('mail')
# pass along these params as POST to the new task
task_params = {'group_key': group_key,
'cursor': q.cursor(),
'send_mail': send_mail}
new_task = taskqueue.Task(params=task_params, url=request.path)
new_task.add()
# task completed, return OK
return http.HttpResponse('OK')
def sendMailAboutGradingRecordResult(self, request, *args, **kwargs):
"""Sends out a mail about the result of one GradingRecord.
Expects the following to be present in the POST dict:
record_key: Specifies the key for the record to process.
Args:
request: Django Request object
"""
post_dict = request.POST
record_key = post_dict.get('record_key')
if not record_key:
# no GradingRecord key specified, log and return OK
error_handler.logErrorAndReturnOK(
'No valid record_key specified in POST data: %s' % request.POST)
record = GSoCGradingRecord.get(db.Key(record_key))
if not record:
# no valid GradingRecord key specified, log and return OK
error_handler.logErrorAndReturnOK(
'No valid GradingRecord key specified: %s' % record_key)
survey_group_entity = record.grading_survey_group
project_entity = record.parent()
student_entity = ndb.Key.from_old_key(project_entity.parent_key()).get()
org_key = GSoCProject.org.get_value_for_datastore(project_entity)
org = ndb.Key.from_old_key(org_key).get()
site_entity = site.singleton()
mail_context = {
'survey_group': survey_group_entity,
'grading_record': record,
'project': project_entity,
'organization': org,
'site_name': site_entity.site_name,
'to_name': student_entity.public_name
}
# set the sender
(_, sender_address) = mail_dispatcher.getDefaultMailSender()
mail_context['sender'] = sender_address
# set the receiver and subject
mail_context['to'] = student_entity.contact.email
mail_context['cc'] = []
mail_context['subject'] = '%s results processed for %s' %(
survey_group_entity.name, project_entity.title)
org_admins = profile_logic.getOrgAdmins(org.key)
# collect all mentors
mentors = ndb.get_multi(
map(ndb.Key.from_old_key,
GSoCProject.mentors.get_value_for_datastore(project_entity)))
# add them all to the cc list
for org_member in org_admins + mentors:
mail_context['cc'].append(org_member.contact.email)
# send out the email using a template
mail_template = 'modules/gsoc/grading_record/mail/result.html'
mail_dispatcher.sendMailFromTemplate(mail_template, mail_context)
# return OK
return http.HttpResponse()
| |
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the learner dashboard and the notifications dashboard."""
from core.domain import exp_services
from core.domain import learner_progress_services
from core.domain import subscription_services
from core.tests import test_utils
import feconf
class LearnerDashboardHandlerTest(test_utils.GenericTestBase):
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EXP_ID_1 = 'EXP_ID_1'
EXP_TITLE_1 = 'Exploration title 1'
EXP_ID_2 = 'EXP_ID_2'
EXP_TITLE_2 = 'Exploration title 2'
EXP_ID_3 = 'EXP_ID_3'
EXP_TITLE_3 = 'Exploration title 3'
COL_ID_1 = 'COL_ID_1'
COL_TITLE_1 = 'Collection title 1'
COL_ID_2 = 'COL_ID_2'
COL_TITLE_2 = 'Collection title 2'
COL_ID_3 = 'COL_ID_3'
COL_TITLE_3 = 'Collection title 3'
def setUp(self):
super(LearnerDashboardHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_can_see_completed_explorations(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['completed_explorations_list']), 0)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id, title=self.EXP_TITLE_1)
self.publish_exploration(self.owner_id, self.EXP_ID_1)
learner_progress_services.mark_exploration_as_completed(
self.viewer_id, self.EXP_ID_1)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['completed_explorations_list']), 1)
self.assertEqual(
response['completed_explorations_list'][0]['id'], self.EXP_ID_1)
self.logout()
def test_can_see_completed_collections(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['completed_collections_list']), 0)
self.save_new_default_collection(
self.COL_ID_1, self.owner_id, title=self.COL_TITLE_1)
self.publish_collection(self.owner_id, self.COL_ID_1)
learner_progress_services.mark_collection_as_completed(
self.viewer_id, self.COL_ID_1)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['completed_collections_list']), 1)
self.assertEqual(
response['completed_collections_list'][0]['id'], self.COL_ID_1)
self.logout()
def test_can_see_incomplete_explorations(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['incomplete_explorations_list']), 0)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id, title=self.EXP_TITLE_1)
self.publish_exploration(self.owner_id, self.EXP_ID_1)
state_name = 'state_name'
version = 1
learner_progress_services.mark_exploration_as_incomplete(
self.viewer_id, self.EXP_ID_1, state_name, version)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['incomplete_explorations_list']), 1)
self.assertEqual(
response['incomplete_explorations_list'][0]['id'], self.EXP_ID_1)
self.logout()
def test_can_see_incomplete_collections(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['incomplete_collections_list']), 0)
self.save_new_default_collection(
self.COL_ID_1, self.owner_id, title=self.COL_TITLE_1)
self.publish_collection(self.owner_id, self.COL_ID_1)
learner_progress_services.mark_collection_as_incomplete(
self.viewer_id, self.COL_ID_1)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['incomplete_collections_list']), 1)
self.assertEqual(
response['incomplete_collections_list'][0]['id'], self.COL_ID_1)
self.logout()
def test_can_see_exploration_playlist(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['exploration_playlist']), 0)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id, title=self.EXP_TITLE_1)
self.publish_exploration(self.owner_id, self.EXP_ID_1)
learner_progress_services.add_exp_to_learner_playlist(
self.viewer_id, self.EXP_ID_1)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['exploration_playlist']), 1)
self.assertEqual(
response['exploration_playlist'][0]['id'], self.EXP_ID_1)
self.logout()
def test_can_see_collection_playlist(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['collection_playlist']), 0)
self.save_new_default_collection(
self.COL_ID_1, self.owner_id, title=self.COL_TITLE_1)
self.publish_collection(self.owner_id, self.COL_ID_1)
learner_progress_services.add_collection_to_learner_playlist(
self.viewer_id, self.COL_ID_1)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['collection_playlist']), 1)
self.assertEqual(
response['collection_playlist'][0]['id'], self.COL_ID_1)
self.logout()
def test_can_see_subscription(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.LEARNER_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscription_list']), 1)
self.assertEqual(
response['subscription_list'][0]['creator_username'],
self.OWNER_USERNAME)
self.logout()
def test_get_learner_dashboard_ids(self):
self.login(self.VIEWER_EMAIL)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id, title=self.EXP_TITLE_1)
self.publish_exploration(self.owner_id, self.EXP_ID_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id, title=self.EXP_TITLE_2)
self.publish_exploration(self.owner_id, self.EXP_ID_2)
self.save_new_default_exploration(
self.EXP_ID_3, self.owner_id, title=self.EXP_TITLE_3)
self.publish_exploration(self.owner_id, self.EXP_ID_3)
self.save_new_default_collection(
self.COL_ID_1, self.owner_id, title=self.COL_TITLE_1)
self.publish_collection(self.owner_id, self.COL_ID_1)
self.save_new_default_collection(
self.COL_ID_2, self.owner_id, title=self.COL_TITLE_2)
self.publish_collection(self.owner_id, self.COL_ID_2)
self.save_new_default_collection(
self.COL_ID_3, self.owner_id, title=self.COL_TITLE_3)
self.publish_collection(self.owner_id, self.COL_ID_3)
state_name = 'state_name'
version = 1
learner_progress_services.mark_exploration_as_completed(
self.viewer_id, self.EXP_ID_1)
learner_progress_services.mark_exploration_as_incomplete(
self.viewer_id, self.EXP_ID_2, state_name, version)
learner_progress_services.add_exp_to_learner_playlist(
self.viewer_id, self.EXP_ID_3)
learner_progress_services.mark_collection_as_completed(
self.viewer_id, self.COL_ID_1)
learner_progress_services.mark_collection_as_incomplete(
self.viewer_id, self.COL_ID_2)
learner_progress_services.add_collection_to_learner_playlist(
self.viewer_id, self.COL_ID_3)
response = self.get_json(feconf.LEARNER_DASHBOARD_IDS_DATA_URL)
learner_dashboard_activity_ids = (
response['learner_dashboard_activity_ids'])
self.assertEqual(
learner_dashboard_activity_ids['completed_exploration_ids'],
[self.EXP_ID_1])
self.assertEqual(
learner_dashboard_activity_ids['incomplete_exploration_ids'],
[self.EXP_ID_2])
self.assertEqual(
learner_dashboard_activity_ids['exploration_playlist_ids'],
[self.EXP_ID_3])
self.assertEqual(
learner_dashboard_activity_ids['completed_collection_ids'],
[self.COL_ID_1])
self.assertEqual(
learner_dashboard_activity_ids['incomplete_collection_ids'],
[self.COL_ID_2])
self.assertEqual(
learner_dashboard_activity_ids['collection_playlist_ids'],
[self.COL_ID_3])
class LearnerDashboardFeedbackThreadHandlerTest(test_utils.GenericTestBase):
EXP_ID_1 = '0'
def setUp(self):
super(LearnerDashboardFeedbackThreadHandlerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
# Load exploration 0.
exp_services.load_demo(self.EXP_ID_1)
# Get the CSRF token and create a single thread with a single message.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID_1)
self.csrf_token = self.get_csrf_token_from_response(response)
self.post_json('%s/%s' % (
feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID_1
), {
'state_name': self._get_unicode_test_string('statename'),
'subject': self._get_unicode_test_string('subject'),
'text': 'a sample message',
}, self.csrf_token)
self.logout()
def test_get_message_summaries(self):
self.login(self.EDITOR_EMAIL)
# Fetch all the feedback threads of that exploration.
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID_1))
# Get the id of the thread.
thread_id = response_dict['threads'][0]['thread_id']
# Get the message summary of the thread.
thread_url = '%s/%s/%s' % (
feconf.LEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL, self.EXP_ID_1,
thread_id)
response_dict = self.get_json(thread_url)
messages_summary = response_dict['message_summary_list']
first_message = messages_summary[0]
self.assertDictContainsSubset({
'text': 'a sample message',
'author_username': 'editor'
}, first_message)
# Add another message.
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID_1, thread_id)
self.post_json(thread_url, {
'updated_status': None,
'updated_subject': None,
'text': 'Message 1'
}, self.csrf_token)
# Again fetch the thread message summary.
thread_url = '%s/%s/%s' % (
feconf.LEARNER_DASHBOARD_FEEDBACK_THREAD_DATA_URL, self.EXP_ID_1,
thread_id)
response_dict = self.get_json(thread_url)
messages_summary = response_dict['message_summary_list']
# Check the summary of the second message.
second_message = messages_summary[1]
self.assertDictContainsSubset({
'text': 'Message 1',
'author_username': 'editor'
}, second_message)
self.logout()
| |
'''
Mathematical operations that generalize many operations from the
standard math module so that they also work on numbers with
uncertainties.
Examples:
from umath import sin
# Manipulation of numbers with uncertainties:
x = uncertainties.ufloat((3, 0.1))
print sin(x) # prints 0.141120008...+/-0.098999...
# The umath functions also work on regular Python floats:
print sin(3) # prints 0.141120008... This is a Python float.
Importing all the functions from this module into the global namespace
is possible. This is encouraged when using a Python shell as a
calculator. Example:
import uncertainties
from uncertainties.umath import * # Imports tan(), etc.
x = uncertainties.ufloat((3, 0.1))
print tan(x) # tan() is the uncertainties.umath.tan function
The numbers with uncertainties handled by this module are objects from
the uncertainties module, from either the Variable or the
AffineScalarFunc class.
(c) 2009-2013 by Eric O. LEBIGOT (EOL) <eric.lebigot@normalesup.org>.
Please send feature requests, bug reports, or feedback to this address.
This software is released under a dual license. (1) The BSD license.
(2) Any other license, as long as it is obtained from the original
author.'''
from __future__ import division # Many analytical derivatives depend on this
# Standard modules
import math
import sys
import itertools
import functools
# Local modules
from __init__ import wrap, set_doc, __author__, to_affine_scalar, AffineScalarFunc
###############################################################################
# We wrap the functions from the math module so that they keep track of
# uncertainties by returning a AffineScalarFunc object.
# Some functions from the math module cannot be adapted in a standard
# way so to work with AffineScalarFunc objects (either as their result
# or as their arguments):
# (1) Some functions return a result of a type whose value and
# variations (uncertainties) cannot be represented by AffineScalarFunc
# (e.g., math.frexp, which returns a tuple). The exception raised
# when not wrapping them with wrap() is more obvious than the
# one obtained when wrapping them (in fact, the wrapped functions
# attempts operations that are not supported, such as calculation a
# subtraction on a result of type tuple).
# (2) Some functions don't take continuous scalar arguments (which can
# be varied during differentiation): math.fsum, math.factorial...
# Such functions can either be:
# - wrapped in a special way.
# - excluded from standard wrapping by adding their name to
# no_std_wrapping
# Math functions that have a standard interface: they take
# one or more float arguments, and return a scalar:
many_scalars_to_scalar_funcs = []
# Some functions require a specific treatment and must therefore be
# excluded from standard wrapping. Functions
# no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial']
# Functions with numerical derivatives:
num_deriv_funcs = ['fmod', 'gamma', 'isinf', 'isnan',
'lgamma', 'trunc']
# Functions that do not belong in many_scalars_to_scalar_funcs, but
# that have a version that handles uncertainties:
non_std_wrapped_funcs = []
# Function that copies the relevant attributes from generalized
# functions from the math module:
wraps = functools.partial(functools.update_wrapper,
assigned=('__doc__', '__name__'))
########################################
# Wrapping of math functions:
# Fixed formulas for the derivatives of some functions from the math
# module (some functions might not be present in all version of
# Python). Singular points are not taken into account. The user
# should never give "large" uncertainties: problems could only appear
# if this assumption does not hold.
# Functions not mentioned in _fixed_derivatives have their derivatives
# calculated numerically.
# Functions that have singularities (possibly at infinity) benefit
# from analytical calculations (instead of the default numerical
# calculation) because their derivatives generally change very fast.
# Even slowly varying functions (e.g., abs()) yield more precise
# results when differentiated analytically, because of the loss of
# precision in numerical calculations.
#def log_1arg_der(x):
# """
# Derivative of log(x) (1-argument form).
# """
# return 1/x
def log_der0(*args):
"""
Derivative of math.log() with respect to its first argument.
Works whether 1 or 2 arguments are given.
"""
if len(args) == 1:
return 1/args[0]
else:
return 1/args[0]/math.log(args[1]) # 2-argument form
# The following version goes about as fast:
## A 'try' is used for the most common case because it is fast when no
## exception is raised:
#try:
# return log_1arg_der(*args) # Argument number check
#except TypeError:
# return 1/args[0]/math.log(args[1]) # 2-argument form
_erf_coef = 2/math.sqrt(math.pi) # Optimization for erf()
fixed_derivatives = {
# In alphabetical order, here:
'acos': [lambda x: -1/math.sqrt(1-x**2)],
'acosh': [lambda x: 1/math.sqrt(x**2-1)],
'asin': [lambda x: 1/math.sqrt(1-x**2)],
'asinh': [lambda x: 1/math.sqrt(1+x**2)],
'atan': [lambda x: 1/(1+x**2)],
'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0
lambda y, x: -y/(x**2+y**2)], # Correct for x == 0
'atanh': [lambda x: 1/(1-x**2)],
'ceil': [lambda x: 0],
'copysign': [lambda x, y: (1 if x >= 0 else -1) * math.copysign(1, y),
lambda x, y: 0],
'cos': [lambda x: -math.sin(x)],
'cosh': [math.sinh],
'degrees': [lambda x: math.degrees(1)],
'erf': [lambda x: math.exp(-x**2)*_erf_coef],
'erfc': [lambda x: -math.exp(-x**2)*_erf_coef],
'exp': [math.exp],
'expm1': [math.exp],
'fabs': [lambda x: 1 if x >= 0 else -1],
'floor': [lambda x: 0],
'hypot': [lambda x, y: x/math.hypot(x, y),
lambda x, y: y/math.hypot(x, y)],
'log': [log_der0,
lambda x, y: -math.log(x, y)/y/math.log(y)],
'log10': [lambda x: 1/x/math.log(10)],
'log1p': [lambda x: 1/(1+x)],
'pow': [lambda x, y: y*math.pow(x, y-1),
lambda x, y: math.log(x) * math.pow(x, y)],
'radians': [lambda x: math.radians(1)],
'sin': [math.cos],
'sinh': [math.cosh],
'sqrt': [lambda x: 0.5/math.sqrt(x)],
'tan': [lambda x: 1+math.tan(x)**2],
'tanh': [lambda x: 1-math.tanh(x)**2]
}
# Many built-in functions in the math module are wrapped with a
# version which is uncertainty aware:
this_module = sys.modules[__name__]
# for (name, attr) in vars(math).items():
for name in dir(math):
if name in fixed_derivatives: # Priority to functions in fixed_derivatives
derivatives = fixed_derivatives[name]
elif name in num_deriv_funcs:
# Functions whose derivatives are calculated numerically by
# this module fall here (isinf, fmod,...):
derivatives = None # Means: numerical calculation required
else:
continue # 'name' not wrapped by this module (__doc__, e, etc.)
func = getattr(math, name)
setattr(this_module, name,
wraps(wrap(func, derivatives), func))
many_scalars_to_scalar_funcs.append(name)
###############################################################################
########################################
# Special cases: some of the functions from no_std_wrapping:
##########
# The math.factorial function is not converted to an uncertainty-aware
# function, because it does not handle non-integer arguments: it does
# not make sense to give it an argument with a numerical error
# (whereas this would be relevant for the gamma function).
##########
# fsum takes a single argument, which cannot be differentiated.
# However, each of the arguments inside this single list can
# be a variable. We handle this in a specific way:
if sys.version_info[:2] >= (2, 6):
# For drop-in compatibility with the math module:
factorial = math.factorial
non_std_wrapped_funcs.append('factorial')
# We wrap math.fsum
original_func = math.fsum # For optimization purposes
# The function below exists so that temporary variables do not
# pollute the module namespace:
def wrapped_fsum():
"""
Returns an uncertainty-aware version of math.fsum, which must
be contained in _original_func.
"""
# The fsum function is flattened, in order to use the
# wrap() wrapper:
flat_fsum = lambda *args: original_func(args)
flat_fsum_wrap = wrap(
flat_fsum, itertools.repeat(lambda *args: 1))
return wraps(lambda arg_list: flat_fsum_wrap(*arg_list),
original_func)
fsum = wrapped_fsum()
non_std_wrapped_funcs.append('fsum')
@set_doc(math.modf.__doc__)
def modf(x):
"""
Version of modf that works for numbers with uncertainty, and also
for regular numbers.
"""
# The code below is inspired by wrap(). It is
# simpler because only 1 argument is given, and there is no
# delegation to other functions involved (as for __mul__, etc.).
aff_func = to_affine_scalar(x)
(frac_part, int_part) = math.modf(aff_func.nominal_value)
if aff_func.derivatives:
# The derivative of the fractional part is simply 1: the
# derivatives of modf(x)[0] are the derivatives of x:
return (AffineScalarFunc(frac_part, aff_func.derivatives), int_part)
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
return (frac_part, int_part)
many_scalars_to_scalar_funcs.append('modf')
@set_doc(math.ldexp.__doc__)
def ldexp(x, y):
# The code below is inspired by wrap(). It is
# simpler because only 1 argument is given, and there is no
# delegation to other functions involved (as for __mul__, etc.).
# Another approach would be to add an additional argument to
# wrap() so that some arguments are automatically
# considered as constants.
aff_func = to_affine_scalar(x) # y must be an integer, for math.ldexp
if aff_func.derivatives:
factor = 2**y
return AffineScalarFunc(
math.ldexp(aff_func.nominal_value, y),
# Chain rule:
dict((var, factor*deriv)
for (var, deriv) in aff_func.derivatives.iteritems()))
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
# aff_func.nominal_value is not passed instead of x, because
# we do not have to care about the type of the return value of
# math.ldexp, this way (aff_func.nominal_value might be the
# value of x coerced to a difference type [int->float, for
# instance]):
return math.ldexp(x, y)
many_scalars_to_scalar_funcs.append('ldexp')
@set_doc(math.frexp.__doc__)
def frexp(x):
"""
Version of frexp that works for numbers with uncertainty, and also
for regular numbers.
"""
# The code below is inspired by wrap(). It is
# simpler because only 1 argument is given, and there is no
# delegation to other functions involved (as for __mul__, etc.).
aff_func = to_affine_scalar(x)
if aff_func.derivatives:
result = math.frexp(aff_func.nominal_value)
# With frexp(x) = (m, e), dm/dx = 1/(2**e):
factor = 1/(2**result[1])
return (
AffineScalarFunc(
result[0],
# Chain rule:
dict((var, factor*deriv)
for (var, deriv) in aff_func.derivatives.iteritems())),
# The exponent is an integer and is supposed to be
# continuous (small errors):
result[1])
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
return math.frexp(x)
non_std_wrapped_funcs.append('frexp')
###############################################################################
# Exported functions:
__all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs
| |
from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan,
GreaterThan, LessThan, Max, Min, And, Or, Eq, Ge, Le, Gt, Lt, Float,
FiniteSet, Intersection, imageset, I, true, false, ProductSet, E,
sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi,
Eq, Pow, Contains, Sum, RootOf, SymmetricDifference, Piecewise,
Matrix)
from mpmath import mpi
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, y, z, m, n
def test_interval_arguments():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(0, oo).right_open is true
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(-oo, 0).left_open is true
assert Interval(oo, -oo) == S.EmptySet
assert isinstance(Interval(1, 1), FiniteSet)
e = Sum(x, (x, 1, 3))
assert isinstance(Interval(e, e), FiniteSet)
assert Interval(1, 0) == S.EmptySet
assert Interval(1, 1).measure == 0
assert Interval(1, 1, False, True) == S.EmptySet
assert Interval(1, 1, True, False) == S.EmptySet
assert Interval(1, 1, True, True) == S.EmptySet
assert isinstance(Interval(0, Symbol('a')), Interval)
assert Interval(Symbol('a', real=True, positive=True), 0) == S.EmptySet
raises(ValueError, lambda: Interval(0, S.ImaginaryUnit))
raises(ValueError, lambda: Interval(0, Symbol('z', real=False)))
raises(NotImplementedError, lambda: Interval(0, 1, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y)))
def test_interval_symbolic_end_points():
a = Symbol('a', real=True)
assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3)
assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a)
assert Interval(0, a).contains(1) == LessThan(1, a)
def test_union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2)
assert Union(S.EmptySet) == S.EmptySet
assert Union(Interval(0, 1), [FiniteSet(1.0/n) for n in range(1, 10)]) == \
Interval(0, 1)
assert Interval(1, 2).union(Interval(2, 3)) == \
Interval(1, 2) + Interval(2, 3)
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
assert Union(Set()) == Set()
assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3)
assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs')
assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3)
assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3)
assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4)
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \
FiniteSet(x, FiniteSet(y, z))
# Test that Intervals and FiniteSets play nicely
assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3)
assert Interval(1, 3, True, True) + FiniteSet(3) == \
Interval(1, 3, True, False)
X = Interval(1, 3) + FiniteSet(5)
Y = Interval(1, 2) + FiniteSet(3)
XandY = X.intersect(Y)
assert 2 in X and 3 in X and 3 in XandY
assert XandY.is_subset(X) and XandY.is_subset(Y)
raises(TypeError, lambda: Union(1, 2, 3))
assert X.is_iterable is False
# issue 7843
assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \
FiniteSet(-sqrt(-I), sqrt(-I))
def test_difference():
assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True)
assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True)
assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True)
assert Interval(1, 3, True) - Interval(2, 3, True) == \
Interval(1, 2, True, False)
assert Interval(0, 2) - FiniteSet(1) == \
Union(Interval(0, 1, False, True), Interval(1, 2, True, False))
assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3)
assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \
FiniteSet(1, 2)
assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4)
assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert -1 in S.Reals - S.Naturals
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False)
assert -1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert Complement(S.Integers, S.UniversalSet) == EmptySet()
assert S.UniversalSet.complement(S.Integers) == EmptySet()
assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0)))
assert S.EmptySet - S.Integers == S.EmptySet
assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1)
assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \
Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi))
def test_complement():
assert Interval(0, 1).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True))
assert Interval(0, 1, True, False).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True))
assert Interval(0, 1, False, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True))
assert Interval(0, 1, True, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True))
assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet
assert S.UniversalSet.complement(S.Reals) == S.EmptySet
assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet
assert S.EmptySet.complement(S.Reals) == S.Reals
assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True),
Interval(3, oo, True, True))
assert FiniteSet(0).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True))
assert (FiniteSet(5) + Interval(S.NegativeInfinity,
0)).complement(S.Reals) == \
Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True)
assert FiniteSet(1, 2, 3).complement(S.Reals) == \
Interval(S.NegativeInfinity, 1, True, True) + \
Interval(1, 2, True, True) + Interval(2, 3, True, True) +\
Interval(3, S.Infinity, True, True)
assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x))
assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) +
Interval(0, oo, True, True)
,FiniteSet(x), evaluate=False)
square = Interval(0, 1) * Interval(0, 1)
notsquare = square.complement(S.Reals*S.Reals)
assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(
pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)])
assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)])
def test_intersect():
x = Symbol('x')
assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2)
assert Interval(0, 2).intersect(Interval(1, 2, True)) == \
Interval(1, 2, True)
assert Interval(0, 2, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, False)
assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, True)
assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \
Union(Interval(0, 1), Interval(2, 2))
assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x)
assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \
FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet
assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \
Union(Interval(0, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \
S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \
S.EmptySet
assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \
Union(FiniteSet(2, 3, 4, 5), Intersection(FiniteSet(6), Union(Interval(0, 5), FiniteSet('ham'))))
# issue 8217
assert Intersection(FiniteSet(x), FiniteSet(y)) == \
Intersection(FiniteSet(x), FiniteSet(y), evaluate=False)
assert FiniteSet(x).intersect(S.Reals) == \
Intersection(S.Reals, FiniteSet(x), evaluate=False)
# tests for the intersection alias
assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
def test_intersection():
# iterable
i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False)
assert i.is_iterable
assert set(i) == set([S(2), S(3)])
# challenging intervals
x = Symbol('x', real=True)
i = Intersection(Interval(0, 3), Interval(x, 6))
assert (5 in i) is False
raises(TypeError, lambda: 2 in i)
# Singleton special cases
assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet
assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x)
# Products
line = Interval(0, 5)
i = Intersection(line**2, line**3, evaluate=False)
assert (2, 2) not in i
assert (2, 2, 2) not in i
raises(ValueError, lambda: list(i))
assert Intersection(Intersection(S.Integers, S.Naturals, evaluate=False),
S.Reals, evaluate=False) == \
Intersection(S.Integers, S.Naturals, S.Reals, evaluate=False)
def test_issue_9623():
n = Symbol('n')
a = S.Reals
b = Interval(0, oo)
c = FiniteSet(n)
assert Intersection(a, b, c) == Intersection(b, c)
assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet()
def test_is_disjoint():
assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False
assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True
def test_ProductSet_of_single_arg_is_arg():
assert ProductSet(Interval(0, 1)) == Interval(0, 1)
def test_interval_subs():
a = Symbol('a', real=True)
assert Interval(0, a).subs(a, 2) == Interval(0, 2)
assert Interval(a, 0).subs(a, 2) == S.EmptySet
def test_interval_to_mpi():
assert Interval(0, 1).to_mpi() == mpi(0, 1)
assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1)
assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1))
def test_measure():
a = Symbol('a', real=True)
assert Interval(1, 3).measure == 2
assert Interval(0, a).measure == a
assert Interval(1, a).measure == a - 1
assert Union(Interval(1, 2), Interval(3, 4)).measure == 2
assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \
== 2
assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0
assert S.EmptySet.measure == 0
square = Interval(0, 10) * Interval(0, 10)
offsetsquare = Interval(5, 15) * Interval(5, 15)
band = Interval(-oo, oo) * Interval(2, 4)
assert square.measure == offsetsquare.measure == 100
assert (square + offsetsquare).measure == 175 # there is some overlap
assert (square - offsetsquare).measure == 75
assert (square * FiniteSet(1, 2, 3)).measure == 0
assert (square.intersect(band)).measure == 20
assert (square + band).measure == oo
assert (band * FiniteSet(1, 2, 3)).measure == nan
def test_is_subset():
assert Interval(0, 1).is_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_subset(Interval(0, 2)) is False
assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4))
assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False
assert FiniteSet(1).is_subset(Interval(0, 2))
assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False
assert (Interval(1, 2) + FiniteSet(3)).is_subset(
(Interval(0, 2, False, True) + FiniteSet(2, 3)))
assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True
assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False
assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True
assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True
assert Interval(0, 1).is_subset(S.EmptySet) is False
assert S.EmptySet.is_subset(S.EmptySet) is True
raises(ValueError, lambda: S.EmptySet.is_subset(1))
# tests for the issubset alias
assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True
assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True
def test_is_proper_subset():
assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False
assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0))
def test_is_superset():
assert Interval(0, 1).is_superset(Interval(0, 2)) == False
assert Interval(0, 3).is_superset(Interval(0, 2))
assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(1).is_superset(Interval(0, 2)) == False
assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False
assert (Interval(1, 2) + FiniteSet(3)).is_superset(
(Interval(0, 2, False, True) + FiniteSet(2, 3))) == False
assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False
assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False
assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False
assert Interval(0, 1).is_superset(S.EmptySet) == True
assert S.EmptySet.is_superset(S.EmptySet) == True
raises(ValueError, lambda: S.EmptySet.is_superset(1))
# tests for the issuperset alias
assert Interval(0, 1).issuperset(S.EmptySet) == True
assert S.EmptySet.issuperset(S.EmptySet) == True
def test_is_proper_superset():
assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False
assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True
assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0))
def test_contains():
assert Interval(0, 2).contains(1) is S.true
assert Interval(0, 2).contains(3) is S.false
assert Interval(0, 2, True, False).contains(0) is S.false
assert Interval(0, 2, True, False).contains(2) is S.true
assert Interval(0, 2, False, True).contains(0) is S.true
assert Interval(0, 2, False, True).contains(2) is S.false
assert Interval(0, 2, True, True).contains(0) is S.false
assert Interval(0, 2, True, True).contains(2) is S.false
assert FiniteSet(1, 2, 3).contains(2) is S.true
assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true
# issue 8197
from sympy.abc import a, b
assert isinstance(FiniteSet(b).contains(-a), Contains)
assert isinstance(FiniteSet(b).contains(a), Contains)
assert isinstance(FiniteSet(a).contains(1), Contains)
raises(TypeError, lambda: 1 in FiniteSet(a))
# issue 8209
rad1 = Pow(Pow(2, S(1)/3) - 1, S(1)/3)
rad2 = Pow(S(1)/9, S(1)/3) - Pow(S(2)/9, S(1)/3) + Pow(S(4)/9, S(1)/3)
s1 = FiniteSet(rad1)
s2 = FiniteSet(rad2)
assert s1 - s2 == S.EmptySet
items = [1, 2, S.Infinity, S('ham'), -1.1]
fset = FiniteSet(*items)
assert all(item in fset for item in items)
assert all(fset.contains(item) is S.true for item in items)
assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true
assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false
assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false
assert S.EmptySet.contains(1) is S.false
assert FiniteSet(RootOf(x**3 + x - 1, 0)).contains(S.Infinity) is S.false
assert RootOf(x**5 + x**3 + 1, 0) in S.Reals
assert not RootOf(x**5 + x**3 + 1, 1) in S.Reals
# non-bool results
assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \
Or(And(x <= 2, x >= 1), And(x <= 4, x >= 3))
assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \
And(y <= 3, y <= x, y >= 1, y >= 2)
def test_interval_symbolic():
x = Symbol('x')
e = Interval(0, 1)
assert e.contains(x) == And(0 <= x, x <= 1)
raises(TypeError, lambda: x in e)
e = Interval(0, 1, True, True)
assert e.contains(x) == And(0 < x, x < 1)
def test_union_contains():
x = Symbol('x')
i1 = Interval(0, 1)
i2 = Interval(2, 3)
i3 = Union(i1, i2)
raises(TypeError, lambda: x in i3)
e = i3.contains(x)
assert e == Or(And(0 <= x, x <= 1), And(2 <= x, x <= 3))
assert e.subs(x, -0.5) is false
assert e.subs(x, 0.5) is true
assert e.subs(x, 1.5) is false
assert e.subs(x, 2.5) is true
assert e.subs(x, 3.5) is false
U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6)
assert all(el not in U for el in [0, 4, -oo])
assert all(el in U for el in [2, 5, 10])
def test_is_number():
assert Interval(0, 1).is_number is False
assert Set().is_number is False
def test_Interval_is_left_unbounded():
assert Interval(3, 4).is_left_unbounded is False
assert Interval(-oo, 3).is_left_unbounded is True
assert Interval(Float("-inf"), 3).is_left_unbounded is True
def test_Interval_is_right_unbounded():
assert Interval(3, 4).is_right_unbounded is False
assert Interval(3, oo).is_right_unbounded is True
assert Interval(3, Float("+inf")).is_right_unbounded is True
def test_Interval_as_relational():
x = Symbol('x')
assert Interval(-1, 2, False, False).as_relational(x) == \
And(Le(-1, x), Le(x, 2))
assert Interval(-1, 2, True, False).as_relational(x) == \
And(Lt(-1, x), Le(x, 2))
assert Interval(-1, 2, False, True).as_relational(x) == \
And(Le(-1, x), Lt(x, 2))
assert Interval(-1, 2, True, True).as_relational(x) == \
And(Lt(-1, x), Lt(x, 2))
assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2))
assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2))
assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo))
assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo))
assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo))
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert Interval(x, y).as_relational(x) == (x <= y)
assert Interval(y, x).as_relational(x) == (y <= x)
def test_Finite_as_relational():
x = Symbol('x')
y = Symbol('y')
assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2))
assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5))
def test_Union_as_relational():
x = Symbol('x')
assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \
Or(And(Le(0, x), Le(x, 1)), Eq(x, 2))
assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \
And(Lt(0, x), Le(x, 1))
def test_Intersection_as_relational():
x = Symbol('x')
assert (Intersection(Interval(0, 1), FiniteSet(2),
evaluate=False).as_relational(x)
== And(And(Le(0, x), Le(x, 1)), Eq(x, 2)))
def test_EmptySet():
assert S.EmptySet.as_relational(Symbol('x')) is False
assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet
assert S.EmptySet.boundary == S.EmptySet
def test_finite_basic():
x = Symbol('x')
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersect(B)
assert A.is_subset(AorB) and B.is_subset(AorB)
assert AandB.is_subset(A)
assert AandB == FiniteSet(3)
assert A.inf == 1 and A.sup == 3
assert AorB.inf == 1 and AorB.sup == 5
assert FiniteSet(x, 1, 5).sup == Max(x, 5)
assert FiniteSet(x, 1, 5).inf == Min(x, 1)
# issue 7335
assert FiniteSet(S.EmptySet) != S.EmptySet
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3)
# Ensure a variety of types can exist in a FiniteSet
s = FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval)
assert (A > B) is False
assert (A >= B) is False
assert (A < B) is False
assert (A <= B) is False
assert AorB > A and AorB > B
assert AorB >= A and AorB >= B
assert A >= A and A <= A
assert A >= AandB and B >= AandB
assert A > AandB and B > AandB
def test_powerset():
# EmptySet
A = FiniteSet()
pset = A.powerset()
assert len(pset) == 1
assert pset == FiniteSet(S.EmptySet)
# FiniteSets
A = FiniteSet(1, 2)
pset = A.powerset()
assert len(pset) == 2**len(A)
assert pset == FiniteSet(FiniteSet(), FiniteSet(1),
FiniteSet(2), A)
# Not finite sets
I = Interval(0, 1)
raises(NotImplementedError, I.powerset)
def test_product_basic():
H, T = 'H', 'T'
unit_line = Interval(0, 1)
d6 = FiniteSet(1, 2, 3, 4, 5, 6)
d4 = FiniteSet(1, 2, 3, 4)
coin = FiniteSet(H, T)
square = unit_line * unit_line
assert (0, 0) in square
assert 0 not in square
assert (H, T) in coin ** 2
assert (.5, .5, .5) in square * unit_line
assert (H, 3, 3) in coin * d6* d6
HH, TT = sympify(H), sympify(T)
assert set(coin**2) == set(((HH, HH), (HH, TT), (TT, HH), (TT, TT)))
assert (d4*d4).is_subset(d6*d6)
assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union(
(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True))*Interval(-oo, oo),
Interval(-oo, oo)*(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True)))
assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3)
assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3)
assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3)
assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square
assert len(coin*coin*coin) == 8
assert len(S.EmptySet*S.EmptySet) == 0
assert len(S.EmptySet*coin) == 0
raises(TypeError, lambda: len(coin*Interval(0, 2)))
def test_real():
x = Symbol('x', real=True, finite=True)
I = Interval(0, 5)
J = Interval(10, 20)
A = FiniteSet(1, 2, 30, x, S.Pi)
B = FiniteSet(-4, 0)
C = FiniteSet(100)
D = FiniteSet('Ham', 'Eggs')
assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C])
assert not D.is_subset(S.Reals)
assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C])
assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D])
assert not (I + A + D).is_subset(S.Reals)
def test_supinf():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert (Interval(0, 1) + FiniteSet(2)).sup == 2
assert (Interval(0, 1) + FiniteSet(2)).inf == 0
assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x)
assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x)
assert FiniteSet(5, 1, x).sup == Max(5, x)
assert FiniteSet(5, 1, x).inf == Min(1, x)
assert FiniteSet(5, 1, x, y).sup == Max(5, x, y)
assert FiniteSet(5, 1, x, y).inf == Min(1, x, y)
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \
S.Infinity
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \
S.NegativeInfinity
assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs')
def test_universalset():
U = S.UniversalSet
x = Symbol('x')
assert U.as_relational(x) is True
assert U.union(Interval(2, 4)) == U
assert U.intersect(Interval(2, 4)) == Interval(2, 4)
assert U.measure == S.Infinity
assert U.boundary == S.EmptySet
assert U.contains(0) is S.true
def test_Union_of_ProductSets_shares():
line = Interval(0, 2)
points = FiniteSet(0, 1, 2)
assert Union(line * line, line * points) == line * line
def test_Interval_free_symbols():
# issue 6211
assert Interval(0, 1).free_symbols == set()
x = Symbol('x', real=True)
assert Interval(0, x).free_symbols == set([x])
def test_image_interval():
from sympy.core.numbers import Rational
x = Symbol('x', real=True)
a = Symbol('a', real=True)
assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \
Interval(-4, 2, True, False)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1, True, True)) == \
Interval(0, 4, False, True)
assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1)
assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \
Interval(-35, 0) # Multiple Maxima
assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \
+ Interval(2, oo) # Single Infinite discontinuity
assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \
Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities
# Test for Python lambda
assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(Lambda(x, a*x), Interval(0, 1)) == \
ImageSet(Lambda(x, a*x), Interval(0, 1))
assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \
ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1))
def test_image_piecewise():
f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True))
f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True))
assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(S(1)/25, oo))
assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1)
@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826
def test_image_Intersection():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \
Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2)))
def test_image_FiniteSet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6)
def test_image_Union():
x = Symbol('x', real=True)
assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \
(Interval(0, 4) + FiniteSet(9))
def test_image_EmptySet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, S.EmptySet) == S.EmptySet
def test_issue_5724_7680():
assert I not in S.Reals # issue 7680
assert Interval(-oo, oo).contains(I) is S.false
def test_boundary():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert FiniteSet(1).boundary == FiniteSet(1)
assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1)
for left_open in (true, false) for right_open in (true, false))
def test_boundary_Union():
assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3)
assert ((Interval(0, 1, False, True)
+ Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2))
assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2)
assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \
== FiniteSet(0, 10)
assert Union(Interval(0, 10, True, True),
Interval(10, 15, True, True), evaluate=False).boundary \
== FiniteSet(0, 10, 15)
@XFAIL
def test_union_boundary_of_joining_sets():
""" Testing the boundary of unions is a hard problem """
assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
def test_boundary_ProductSet():
open_square = Interval(0, 1, True, True) ** 2
assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1))
second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True)
assert (open_square + second_square).boundary == (
FiniteSet(0, 1) * Interval(0, 1)
+ FiniteSet(1, 2) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1)
+ Interval(1, 2) * FiniteSet(0, 1))
def test_boundary_ProductSet_line():
line_in_r2 = Interval(0, 1) * FiniteSet(0)
assert line_in_r2.boundary == line_in_r2
def test_is_open():
assert not Interval(0, 1, False, False).is_open
assert not Interval(0, 1, True, False).is_open
assert Interval(0, 1, True, True).is_open
assert not FiniteSet(1, 2, 3).is_open
def test_is_closed():
assert Interval(0, 1, False, False).is_closed
assert not Interval(0, 1, True, False).is_closed
assert FiniteSet(1, 2, 3).is_closed
def test_closure():
assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False)
def test_interior():
assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True)
def test_issue_7841():
raises(TypeError, lambda: x in S.Reals)
def test_Eq():
assert Eq(Interval(0, 1), Interval(0, 1))
assert Eq(Interval(0, 1), Interval(0, 2)) == False
s1 = FiniteSet(0, 1)
s2 = FiniteSet(1, 2)
assert Eq(s1, s1)
assert Eq(s1, s2) == False
assert Eq(s1*s2, s1*s2)
assert Eq(s1*s2, s2*s1) == False
def test_SymmetricDifference():
assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \
FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10)
assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3 ,4 ,5 )) \
== FiniteSet(5)
assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \
FiniteSet(3, 4, 6)
assert Set(1, 2 ,3) ^ Set(2, 3, 4) == Union(Set(1, 2, 3) - Set(2, 3, 4), \
Set(2, 3, 4) - Set(1, 2, 3))
assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \
Interval(2, 5), Interval(2, 5) - Interval(0, 4))
def test_issue_9536():
from sympy.functions.elementary.exponential import log
a = Symbol('a', real=True)
assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a)))
def test_issue_9637():
n = Symbol('n')
a = FiniteSet(n)
b = FiniteSet(2, n)
assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False)
assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False)
assert Complement(Interval(1, 3), b) == \
Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a)
assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False)
assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False)
def test_issue_9808():
assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False)
assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \
Complement(FiniteSet(1), FiniteSet(y), evaluate=False)
def test_issue_9956():
assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo)
assert Interval(-oo, oo).contains(1) is S.true
def test_issue_Symbol_inter():
i = Interval(0, oo)
r = S.Reals
mat = Matrix([0, 0, 0])
assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \
Intersection(i, FiniteSet(m))
assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \
Intersection(i, FiniteSet(m, n))
assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \
Intersection(r, FiniteSet(m, z), FiniteSet(n, x))
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \
Intersection(r, FiniteSet(x), FiniteSet(3, m, n), evaluate=False)
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \
Union(FiniteSet(3), Intersection(r, FiniteSet(m, n)))
assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \
Intersection(r, FiniteSet(n))
assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \
Intersection(r, FiniteSet(sin(x), cos(x)))
assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \
Intersection(r, FiniteSet(x**2, sin(x)))
def test_issue_10113():
f = x**2/(x**2 - 4)
assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True))
assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0)
assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo))
| |
import sys
from mypyli import taxstring, taxtree
class TaxNode(object):
taxNodeCollection = {}
@classmethod
def get_node(cls, id):
return cls.taxNodeCollection.get(id, None)
@classmethod
def add_node(cls, id, taxnode):
cls.taxNodeCollection[id] = taxnode
@classmethod
def get_all_nodes(cls):
return cls.taxNodeCollection
@classmethod
def create_node_from_id(cls, id):
"""
Uses the taxstring module to create a full TaxNode from only a taxid.
The lookup is slow so this is best used as a supplement to fill in gaps.
"""
tax = taxstring.TaxString(id, is_id=True, lookup=True)
rank = tax.get_lowest_rank()
name = tax.get_tax_at_rank(rank)
parent = tax.get_parent_id()
cls(id, name, rank, parent)
def get_full_taxonomy(self, taxonomy=""):
if not taxonomy:
taxonomy = {}
if self.parent:
parent = self.get_parent_node()
else:
parent = None
rank = self.get_rank()
if parent:
taxonomy = parent.get_full_taxonomy(taxonomy)
if not self.get_name():
print("looking up {}".format(self.id))
taxstr = taxstring.TaxString(tax=self.id, is_id=True, lookup=True)
name = taxstr.get_tax_at_rank(taxstr.get_lowest_rank(), suppress=True)
print(name)
self.set_name(name)
if rank:
if rank == "superkingdom":
rank = "kingdom"
taxonomy[rank] = self.get_name()
return taxonomy
def __init__(self, id, name="", rank="", parent=""):
# check if this node has already been created
if self.get_node(id):
print("Warning: attempting to add duplicate node. Ignoring attempt.")
else: # add a new node
self.id = id
if self.id == "1":
self.name = "root"
self.parent = None
self.rank = "root"
else:
#print("id=" + id)
self.name = name
self.rank = rank
self.parent = parent
self.add_node(id, self)
def get_parent_node(self):
if self.parent:
if self.get_node(self.parent):
return self.get_node(self.parent)
else: # we have a taxid but no parent entry so make a new entry
print("Looking up parent {}".format(self.parent))
self.create_node_from_id(self.parent)
return self.get_node(self.parent)
else:
raise AssertionError("Node has no parent.")
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_rank(self):
return self.rank
def get_full_lineage(self, lineage=[]):
""" Returns a list of the nodes in the lineage (ending with the lowest) """
if self.parent:
parent = self.get_parent_node()
else:
parent = None
if not self.get_name():
print("looking up {}".format(self.id))
taxstr = taxstring.TaxString(tax=self.id, is_id=True, lookup=True)
name = taxstr.get_tax_at_rank(taxstr.get_lowest_rank(), suppress=True)
print(name)
self.set_name(name)
lineage = [self] + lineage
if parent:
parents = parent.get_full_lineage(lineage)
return lineage
def get_names_for_ids(names_f):
data_dict = {}
with open(names_f, 'r') as IN:
for line in IN:
elements = line.split("\t|\t")
# replace the line break of the last element
elements[-1] = elements[-1].rstrip("\t|\n")
if elements[-1] == "scientific name":
data_dict[elements[0]] = {'name': elements[1]}
return data_dict
def get_parents_for_ids(nodes_f, data_dict):
with open(nodes_f, 'r') as IN:
for line in IN:
elements = line.split("\t|\t")
elements[-1] = elements[-1].rstrip("\t|\n")
id=elements[0]
parent=elements[1]
rank=elements[2]
if not parent:
print("Warning: taxid {} doesn't have parent".format(id))
data_dict[id].update({'parent': parent, 'rank': rank})
def add_nodes_recurs(to_add, parent2child, data_dict, tree):
next_add = []
for parent in to_add:
pnode = tree.lookup_taxid(parent)
for child in parent2child.get(parent, []):
entry = data_dict.get(child, None)
cnode = taxtree.TaxNode(child, name=entry['name'], rank=entry['rank'], parent=pnode)
pnode.add_child(cnode)
next_add.append(child)
print("Processed {} nodes.".format(len(tree.taxnodes)))
if next_add:
tree = add_nodes_recurs(next_add, parent2child, data_dict, tree)
return tree
def build_tree(data_dict):
tree = taxtree.TaxTree()
taxtree.TaxNode.set_default_tree(tree)
# make a parent to id dict
parent2child = {}
for taxid, data in data_dict.items():
parent2child[data['parent']] = parent2child.get(data['parent'], []) + [taxid]
# add the root node
taxtree.TaxNode(taxid="131567", name="cellular organisms", rank="root", parent=None)
print("Adding nodes...", file=sys.stderr)
tree = add_nodes_recurs(["131567"], parent2child, data_dict, tree)
return tree
if __name__ == "__main__":
data_dict = get_names_for_ids(sys.argv[1])
get_parents_for_ids(sys.argv[2], data_dict)
tree = build_tree(data_dict)
tree.save_tree("tree.pickle")
| |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Parcoords(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "parcoords"
_valid_props = {
"customdata",
"customdatasrc",
"dimensiondefaults",
"dimensions",
"domain",
"ids",
"idssrc",
"labelangle",
"labelfont",
"labelside",
"line",
"meta",
"metasrc",
"name",
"rangefont",
"stream",
"tickfont",
"type",
"uid",
"uirevision",
"visible",
}
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dimensions
# ----------
@property
def dimensions(self):
"""
The dimensions (variables) of the parallel coordinates chart.
2..60 dimensions are supported.
The 'dimensions' property is a tuple of instances of
Dimension that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.Dimension
- A list or tuple of dicts of string/value properties that
will be passed to the Dimension constructor
Supported dict properties:
constraintrange
The domain range to which the filter on the
dimension is constrained. Must be an array of
`[fromValue, toValue]` with `fromValue <=
toValue`, or if `multiselect` is not disabled,
you may give an array of arrays, where each
inner array is `[fromValue, toValue]`.
label
The shown name of the dimension.
multiselect
Do we allow multiple selection ranges or just a
single range?
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
range
The domain range that represents the full,
shown axis extent. Defaults to the `values`
extent. Must be an array of `[fromValue,
toValue]` with finite numbers as elements.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
ticktext
Sets the text displayed at the ticks position
via `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
values
Dimension values. `values[n]` represents the
value of the `n`th point in the dataset,
therefore the `values` vector for all
dimensions must be the same (longer vectors
will be truncated). Each value must be a finite
number.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Shows the dimension when set to `true` (the
default). Hides the dimension for `false`.
Returns
-------
tuple[plotly.graph_objs.parcoords.Dimension]
"""
return self["dimensions"]
@dimensions.setter
def dimensions(self, val):
self["dimensions"] = val
# dimensiondefaults
# -----------------
@property
def dimensiondefaults(self):
"""
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets the
default property values to use for elements of
parcoords.dimensions
The 'dimensiondefaults' property is an instance of Dimension
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Dimension`
- A dict of string/value properties that will be passed
to the Dimension constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.Dimension
"""
return self["dimensiondefaults"]
@dimensiondefaults.setter
def dimensiondefaults(self, val):
self["dimensiondefaults"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this parcoords
trace .
row
If there is a layout grid, use the domain for
this row in the grid for this parcoords trace .
x
Sets the horizontal domain of this parcoords
trace (in plot fraction).
y
Sets the vertical domain of this parcoords
trace (in plot fraction).
Returns
-------
plotly.graph_objs.parcoords.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# labelangle
# ----------
@property
def labelangle(self):
"""
Sets the angle of the labels with respect to the horizontal.
For example, a `tickangle` of -90 draws the labels vertically.
Tilted labels with "labelangle" may be positioned better inside
margins when `labelposition` is set to "bottom".
The 'labelangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["labelangle"]
@labelangle.setter
def labelangle(self, val):
self["labelangle"] = val
# labelfont
# ---------
@property
def labelfont(self):
"""
Sets the font for the `dimension` labels.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
# labelside
# ---------
@property
def labelside(self):
"""
Specifies the location of the `label`. "top" positions labels
above, next to the title "bottom" positions labels below the
graph Tilted labels with "labelangle" may be positioned better
inside margins when `labelposition` is set to "bottom".
The 'labelside' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["labelside"]
@labelside.setter
def labelside(self, val):
self["labelside"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `line.colorscale`. Has an effect
only if in `line.color`is set to a numerical
array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette
will be chosen according to whether numbers in
the `color` array are all positive, all
negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `line.color`) or the bounds set in
`line.cmin` and `line.cmax` Has an effect only
if in `line.color`is set to a numerical array.
Defaults to `false` when `line.cmin` and
`line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `line.cmin` and/or `line.cmax` to be
equidistant to this point. Has an effect only
if in `line.color`is set to a numerical array.
Value should have the same units as in
`line.color`. Has no effect when `line.cauto`
is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `line.color`is set to a
numerical array. Value should have the same
units as in `line.color` and if set,
`line.cmax` must be set as well.
color
Sets thelinecolor. It accepts either a specific
color or an array of numbers that are mapped to
the colorscale relative to the max and min
values of the array or relative to `line.cmin`
and `line.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.parcoords.line.Col
orBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`line.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`line.cmin` and `line.cmax`.
Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,R
ainbow,Portland,Jet,Hot,Blackbody,Earth,Electri
c,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `line.color`is set to a
numerical array. If true, `line.cmin` will
correspond to the last color in the array and
`line.cmax` will correspond to the first color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `line.color`is set to a numerical array.
Returns
-------
plotly.graph_objs.parcoords.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# rangefont
# ---------
@property
def rangefont(self):
"""
Sets the font for the `dimension` range values.
The 'rangefont' property is an instance of Rangefont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Rangefont`
- A dict of string/value properties that will be passed
to the Rangefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Rangefont
"""
return self["rangefont"]
@rangefont.setter
def rangefont(self, val):
self["rangefont"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.parcoords.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the font for the `dimension` tick values.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dimensions
The dimensions (variables) of the parallel coordinates
chart. 2..60 dimensions are supported.
dimensiondefaults
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets
the default property values to use for elements of
parcoords.dimensions
domain
:class:`plotly.graph_objects.parcoords.Domain` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
labelangle
Sets the angle of the labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
labels vertically. Tilted labels with "labelangle" may
be positioned better inside margins when
`labelposition` is set to "bottom".
labelfont
Sets the font for the `dimension` labels.
labelside
Specifies the location of the `label`. "top" positions
labels above, next to the title "bottom" positions
labels below the graph Tilted labels with "labelangle"
may be positioned better inside margins when
`labelposition` is set to "bottom".
line
:class:`plotly.graph_objects.parcoords.Line` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
rangefont
Sets the font for the `dimension` range values.
stream
:class:`plotly.graph_objects.parcoords.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `dimension` tick values.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
customdata=None,
customdatasrc=None,
dimensions=None,
dimensiondefaults=None,
domain=None,
ids=None,
idssrc=None,
labelangle=None,
labelfont=None,
labelside=None,
line=None,
meta=None,
metasrc=None,
name=None,
rangefont=None,
stream=None,
tickfont=None,
uid=None,
uirevision=None,
visible=None,
**kwargs
):
"""
Construct a new Parcoords object
Parallel coordinates for multidimensional exploratory data
analysis. The samples are specified in `dimensions`. The colors
are set in `line.color`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Parcoords`
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dimensions
The dimensions (variables) of the parallel coordinates
chart. 2..60 dimensions are supported.
dimensiondefaults
When used in a template (as
layout.template.data.parcoords.dimensiondefaults), sets
the default property values to use for elements of
parcoords.dimensions
domain
:class:`plotly.graph_objects.parcoords.Domain` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
labelangle
Sets the angle of the labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
labels vertically. Tilted labels with "labelangle" may
be positioned better inside margins when
`labelposition` is set to "bottom".
labelfont
Sets the font for the `dimension` labels.
labelside
Specifies the location of the `label`. "top" positions
labels above, next to the title "bottom" positions
labels below the graph Tilted labels with "labelangle"
may be positioned better inside margins when
`labelposition` is set to "bottom".
line
:class:`plotly.graph_objects.parcoords.Line` instance
or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
rangefont
Sets the font for the `dimension` range values.
stream
:class:`plotly.graph_objects.parcoords.Stream` instance
or dict with compatible properties
tickfont
Sets the font for the `dimension` tick values.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Parcoords
"""
super(Parcoords, self).__init__("parcoords")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Parcoords
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Parcoords`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dimensions", None)
_v = dimensions if dimensions is not None else _v
if _v is not None:
self["dimensions"] = _v
_v = arg.pop("dimensiondefaults", None)
_v = dimensiondefaults if dimensiondefaults is not None else _v
if _v is not None:
self["dimensiondefaults"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("labelangle", None)
_v = labelangle if labelangle is not None else _v
if _v is not None:
self["labelangle"] = _v
_v = arg.pop("labelfont", None)
_v = labelfont if labelfont is not None else _v
if _v is not None:
self["labelfont"] = _v
_v = arg.pop("labelside", None)
_v = labelside if labelside is not None else _v
if _v is not None:
self["labelside"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("rangefont", None)
_v = rangefont if rangefont is not None else _v
if _v is not None:
self["rangefont"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "parcoords"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
import xml.etree.ElementTree
import fractions
import os
import collections
from collections import defaultdict
import fractions
import midi_to_statematrix
import math
lowerBound = 24
upperBound = 102
numPitches = upperBound - lowerBound
#get the "divisions" which is the number of time
#units per beat
def getDivisions(e):
divisions_val = None
retval = {}
for part in e:
if part.tag == 'part':
partName = part.attrib['id']
for measure in part:
if measure.tag == 'measure':
for attributes in measure:
if attributes.tag == 'attributes':
for divisions in attributes:
if divisions.tag == 'divisions':
divs = int(divisions.text)
retval[partName] = divs
if divisions_val == None:
divisions_val = divs
# else:
#let's just check to see that there is
#always agreement
#nvm, doesn't matter
#if divisions_val != divs:
#print "Divisions don't agree: {0} != {1}".format(divisions_val, divisions.text)
# return divisions_val
return retval
#if it's a rest, return the
#duration, otherwise return none
def getRestLength(note):
duration = None
isRest = False
for el in note:
if el.tag == 'rest':
isRest = True
elif el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print "Duration tag found twice for note..."
if isRest:
if duration == None:
#problem...
print "Rest with no duration found"
else:
return duration
else:
#it's not a rest; return none
return None
#return the duration for a backup element
def getBackupLength(backup):
duration = None
for el in backup:
if el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print "Duration tag found twice for note..."
return duration
def xmlPitchToMidiPitch(letter, octave, alter):
table = {
"C" : 0,
"D" : 2,
"E" : 4,
"F" : 5,
"G" : 7,
"A" : 9,
"B" : 11,
}
if not letter in table.keys():
print "Letter {0} is not a valid letter A-G".format(letter)
return 12 + table[letter] + 12 * octave + alter
#get pitch, and duration for a note
def getNoteInfo(note, measureNum):
duration = None
step = None
octave = None
alter = None
isRest = False
isChord = False
tieType = None
for el in note:
if el.tag == 'rest':
isRest = True
elif el.tag == 'duration':
if duration == None:
duration = int(el.text)
else:
#found duration tag twice
print "Duration tag found twice for note..."
elif el.tag == 'chord':
isChord = True
elif el.tag == 'tie':
tieType = el.attrib['type']
elif el.tag == 'pitch':
for pitchel in el:
if pitchel.tag == 'step':
if step == None:
step = pitchel.text
else:
#found step tag twice
print "step tag found twice for note..."
if pitchel.tag == 'octave':
if octave == None:
octave = int(pitchel.text)
else:
#found octave tag twice
print "octave tag found twice for note..."
if pitchel.tag == 'alter':
if alter == None:
alter = int(pitchel.text)
else:
#found alter tag twice
print "alter tag found twice for note..."
if isRest:
#if it's a rest, then return None
return None
else:
if duration == None:
#this can happen for grace notes so actually just return none
return None
elif step == None:
print "Note with no step found"
elif octave == None:
print "Note with no octave found"
if alter == None:
alter = 0
midiPitch = xmlPitchToMidiPitch(step, octave, alter)
return (midiPitch, duration, isChord, tieType)
def iterateThroughMusic(e, handleNote, handleMeasure = None, handleRest = None, handlePart = None):
#for legacy reasons
resolution = 1
for part in e:
if part.tag == 'part':
partName = part.attrib['id']
if handlePart != None:
handlePart(partName)
#keep track of the current time
timePos = 0
measureNum = 0
lastNoteTimePos = 0
for measure in part:
if handleMeasure != None:
handleMeasure()
if measure.tag == 'measure':
#remember measure start time
#measureStartTime = timePos
#record latest time
latestTime = timePos
for note in measure:
if note.tag == 'note':
res = getRestLength(note)
if res == None:
#it's a note
res = getNoteInfo(note, measureNum)
if res == None:
#this can happen for grace notes, for example,
#just ignore
continue
midiPitch, duration, isChord, tieType = res
#allNotes[timePos, (midiPitch, duration)]
#print "Found note, pitch: {0}, duration: {1}".format(midiPitch, duration)
if timePos % resolution == 0:
if isChord:
#print "isChord, lastTime: {0}, currTime: {1}".format(lastNoteTimePos, timePos)
timePosForNote = lastNoteTimePos
else:
timePosForNote = timePos
if tieType != 'stop':
handleNote(timePosForNote / resolution, midiPitch, (duration - 1) / resolution + 1, partName)
if not isChord:
lastNoteTimePos = timePos
timePos += duration
else:
#it's a rest
duration = res
if handleRest != None:
handleRest(timePos, duration)
timePos += duration
elif note.tag == 'backup':
duration = getBackupLength(note)
timePos -= duration
if timePos > latestTime:
latestTime = timePos
timePos = latestTime
#look under the current node and return
#the first node with the given name, if
#it exists
def getNodesUnderNodeWithName(node, name):
retlist = []
for el in node:
if el.tag == name:
retlist.append(el)
retlist = retlist + getNodesUnderNodeWithName(el, name)
return retlist
#look under the current node and return
#the first node with the given name, if
#it exists
def getNodeUnderNodeWithName(node, name):
thelist = getNodesUnderNodeWithName(node, name)
if thelist:
return thelist[0]
else:
return None
# for el in node:
# if el.tag == name:
# return el
# else:
# res = getNodeUnderNodeWithName(el, name)
# if res != None:
# return res
# return None
#parse XML to find the tempo. Note that for some songs,
#no tempo will exists, in which case return None. Also,
#for some songs, there will be multiple tempos, in which
#case probably just return the first one found.
def getTempoForSong(tree):
soundNodes = getNodesUnderNodeWithName(tree, 'sound')
for soundNode in soundNodes:
if 'tempo' in soundNode.attrib.keys():
return int(round(float(soundNode.attrib['tempo'])))
return None
#return hashmap of part to int, where the int
#is the amount to transpose each part in half steps.
#if there is no transposition for a given part, it
#can be omitted from the hash map
def getTranspositions(tree):
ret = {}
parts = getNodesUnderNodeWithName(tree, 'part')
for part in parts:
if 'id' in part.attrib.keys():
partId = part.attrib['id']
transposeNode = getNodeUnderNodeWithName(part, 'transpose')
if transposeNode != None:
for chromatic in transposeNode:
if chromatic.tag == 'chromatic':
ret[partId] = int(chromatic.text)
break
return ret
#we'll put this in its own routine, basically, the problem is,
#suppose a beat can be divided into div1 divisions and div2
#divisions. Suppose num specifies a point in time in divisions
#along the first scale. Can it be translated to a point in
#time in units of the second scale? If so, what is the number
#of units (everything must be an integer)
#In our code, this will be used to translate notes from "divs"
#(time unit of XML file) to "slices" (time unit of statematrix)
#If the note can't be translated then it is lost
def translateToDifferentDivScale(num, divs1, divs2):
theGcd = fractions.gcd(divs1, divs2)
if num % (divs2/theGcd) != 0:
#we can't translate it
return None
else:
return num * divs2 / divs1
#parses XML, delivering events to the callback
#that indicate note locations/durations in
#slices. This can be used as a basis for parsing
#XML into various specific data structures
#also, this function returns a number indicating
#the number of slices that are actually a pickup
def parseXMLToSomething(xmltree, noteCreationCallback):
#examine tree for any transpositions
transpositions = getTranspositions(xmltree)
#examine tree for tempo
tempo = getTempoForSong(xmltree)
if tempo == None:
raise ValueError("can't produce state matrix for this XML, as there is no tempo")
#also, check music to see if there's a pickup.
#To do this, we look at the first two measures,
#if the lengths are different (as can be determined
#by looking at the notes and rests) then we have a
#nonzero pickup, which is the length of the first measure
class PickupLengthHandler:
def __init__(self):
self.measureNum = 0
self.latestTimeSeen = 0
self.measureLengths = [0, 0]
def __handleSomething(self, time, duration):
if self.measureNum == 1 or self.measureNum == 2:
index = self.measureNum - 1
if time + duration > self.measureLengths[index]:
self.measureLengths[index] = time + duration
def __call__(self, time, pitch, duration, part):
self.__handleSomething(time, duration)
def handleMeasure(self):
self.measureNum += 1
def handleRest(self, timePos, duration):
self.__handleSomething(timePos, duration)
def handlePart(self, partName):
self.partName = partName
def getPickupDivisions(self):
if self.measureLengths[0] == self.measureLengths[1]:
return 0
else:
return self.measureLengths[0]
plm = PickupLengthHandler()
iterateThroughMusic(xmltree, plm, plm.handleMeasure, plm.handleRest, plm.handlePart)
pickupDivisions = plm.getPickupDivisions()
pickupDivisionsPart = plm.partName
#This is a constant, but actually it should be an input parameter. Anyways,
#given the tempo, the secondsPerSlice, and the divisions per beat, we should
#be able to figure out how divisions in the input correspond to slices in the
#output
secondsPerSlice = 0.125
beatsPerMinute = float(tempo)
beatsPerSecond = beatsPerMinute / 60
#e = xml.etree.ElementTree.parse(xmlfile).getroot()
e = xmltree
#returns hashmap, part to divisions number
divisions = getDivisions(e)
#compute lcm of divisions over various parts, this
#will be the divisions we use
divisionsLCM = None
for k in divisions.keys():
thisDiv = divisions[k]
if divisionsLCM == None:
divisionsLCM = thisDiv
else:
divisionsLCM = (thisDiv * divisionsLCM)/fractions.gcd(thisDiv, divisionsLCM)
#use divisions now to translate the pickup divisions for the given part, not all
#parts use the same division scale, so use the LCM scale
pickupDivisions *= (divisionsLCM/divisions[pickupDivisionsPart])
divisionsPerBeat = divisionsLCM
#this will be an exact floating point number
#print "secondsPerSlice: {}".format(secondsPerSlice)
#print "beatsPerSecond: {}".format(beatsPerSecond)
slicesPerBeat = 1 / (beatsPerSecond * secondsPerSlice)
#we require that the number of slices for a beat be an integer which
#is a power of two. To do this, we'll take the log base 2, round
#to the nearest int, then compute inverse log
#print "SlicesPerBeat (real): {}".format(slicesPerBeat)
slicesPerBeat = int(2**(int(round(math.log(slicesPerBeat, 2)))))
#print "SlicesPerBeat: {}".format(slicesPerBeat)
#print "divisionsPerBeat: {}".format(divisionsPerBeat)
#compute gcd of slices per beat and divisions per beat
slicesDivisionsGcd = fractions.gcd(slicesPerBeat, divisionsPerBeat)
#we require that for a note to be resolved to slices, it's time in
#divisions must be divisible by this number
divisionsDivisor = divisionsPerBeat / slicesDivisionsGcd
#compute the size of the pickup in slices, this is information
#that will be needed for neural net training
pickupSlices = pickupDivisions * slicesPerBeat / divisionsPerBeat
#print "Pickup Divs: {}".format(pickupDivisions)
#print "Pickup Slices: {}".format(pickupSlices)
def handleNote_createStateMatrix(time, pitch, duration, part):
#if part == 'P2':
#print "Got note, pitch: {0}, duration: {1}, time: {2}".format(pitch, duration, time)
pitch
if part in transpositions.keys():
pitch += transpositions[part]
#Sometimes different parts have different
#numbers of divisions, scale so that the time/
#duration is in terms of the LCM divisions
if divisions[part] != divisionsLCM:
#print "LCM scaling happening"
scalingFactor = (divisionsLCM / divisions[part])
time *= scalingFactor
duration *= scalingFactor
#time and duration are in divisions, we need them in slices
if time % divisionsDivisor != 0:
#this note doesn't fall on a slice boundary so we just skip it
return
else:
time = time * slicesPerBeat / divisionsPerBeat
#print "duration before: {}".format(duration)
duration = duration * slicesPerBeat / divisionsPerBeat
#print "duration after: {}".format(duration)
if duration == 0:
duration = 1
noteCreationCallback(time, pitch, duration)
#ad hoc--if divisions are divisible by 3, then assume
#that the division is at the lowest level for the piece,
#we set the granularity to ignore this subdivision level
iterateThroughMusic(e, handleNote_createStateMatrix)
return pickupSlices
#wrapper that takes filename instead of tree
def parseXMLFileToSomething(xmlFile, noteCreationCallback):
tree = xml.etree.ElementTree.parse(xmlFile).getroot()
return parseXMLToSomething(tree, noteCreationCallback)
def stateMatrixForSong(tree):
stateMatrix = []
def handleNoteCreation(time, pitch, duration):
#for state matrices, we shift pitch down
#by lower bound constant
pitch -= lowerBound
#if necessary, extend state matrix so
#that the desired times exists
#last time needed is time + duration - 1,
#len <= last time needed, so...
#print "Note at time {0}, pitch: {1}".format(time, pitch)
while len(stateMatrix) < time + duration:
row = numPitches * [[0, 0]]
stateMatrix.append(row)
#print "time: {}".format(time)
#print "size: {}".format(len(stateMatrix))
stateMatrix[time][pitch] = [1, 1]
for i in range(time + 1, time + duration):
if stateMatrix[i][pitch] == [0, 0]:
stateMatrix[i][pitch] = [1, 0]
pickupSlices = parseXMLToSomething(tree, handleNoteCreation)
return (pickupSlices, stateMatrix)
def createStateMatrices(basedir = 'musicxml', minslices = 0):
stateMatrices = {}
for theFile in os.listdir(os.getcwd() + '/' + basedir):
if not theFile.split('.')[-1] == 'xml':
continue
#parse xml file into document tree
print basedir + '/' + theFile
tree = xml.etree.ElementTree.parse(basedir + '/' + theFile).getroot()
if getTempoForSong(tree) == None:
print "File {} has no tempo!!!".format(theFile)
else:
sm = stateMatrixForSong(tree)
songMatrix = sm[1]
if len(songMatrix) < minslices:
print "File {} omitted, it is too short.".format(theFile)
else:
stateMatrices[theFile] = sm
return stateMatrices
#NOTE: INTERFACE CHANGED--now returns 0 on success,
#1 on failure, reason for failure is that there is
#actually no tempo information in the xml file, so
#we don't know how to convert to midi
def midiForXML(xmlFile, midiDestFile):
#parse xml file into document tree
tree = xml.etree.ElementTree.parse(xmlFile).getroot()
tempo = getTempoForSong(tree)
#We're no longer using a default tempo, this was never
#really a good idea, since actually the various tempos
#can differ by an order of magnitued, instead, we return
#a code to indicate success or failure.
#if tempo == None:
# tempo = 120
if tempo == None:
return 1
else:
stateMatrix = stateMatrixForSong(tree, 0)[1]
midi_to_statematrix.noteStateMatrixToMidi(stateMatrix, name=midiDestFile)
return 0
#NO LONGER USED!!!!
def createStateMatrices_old():
basedir = "musicxml/"
f = open(basedir + 'catalog.txt', "r")
lines = f.readlines()
f.close()
stateMatrices = {}
#function that returns the default
#value of a state matrix
def defaultValFactory():
return [0, 0]
inBlockComment = False
while lines:
line = lines[0]
del lines[0]
if len(line) > 0 and line[0] == '#':
continue
toks = line.split()
if len(toks) == 0:
continue
if inBlockComment:
if toks[0] == 'endcomment':
inBlockComment = False
continue
if toks[0] == 'begincomment':
inBlockComment = True
continue
if len(toks) == 2 and toks[0] == 'file':
pass
else:
continue
origFilename = toks[1]
mxlfile = basedir + origFilename
print mxlfile
transpositions = {}
slow = None
speed = None
startTime = 0
while lines and len(lines[0].split()) != 0 and lines[0].split()[0] != 'file':
line = lines[0]
del lines[0]
toks = line.split()
if toks[0] == 'transpose':
if not len(toks) == 3:
continue
transpositions[toks[1]] = int(toks[2])
elif toks[0] == 'slow':
if not len(toks) == 2:
continue
slow = int(toks[1])
elif toks[0] == 'speed':
if not len(toks) == 2:
continue
speed = int(toks[1])
elif toks[0] == 'start-time':
if not len(toks) == 2:
continue
startTime = float(toks[1])
#parse xml file into document tree
tree = xml.etree.ElementTree.parse(mxlfile).getroot()
if getTempoForSong(tree) == None:
print "File {} has no tempo!!!".format(mxlfile)
else:
stateMatrices[origFilename] = stateMatrixForSong(tree)
return stateMatrices
if __name__ == "__main__":
stateMatrices = createStateMatrices()
print "{0} songs total.".format(len(stateMatrices))
#print "Pwd: " + os.getcwd()
for k in stateMatrices.keys():
midi_to_statematrix.noteStateMatrixToMidi(stateMatrices[k][1], name='./midi_output_test/{}'.format(k))
| |
# Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import codecs
import json
import os
from nikola.plugin_categories import Task
from nikola import utils
class RenderTags(Task):
"""Render the tag pages and feeds."""
name = "render_tags"
def gen_tasks(self):
"""Render the tag pages and feeds."""
kw = {
"translations": self.site.config["TRANSLATIONS"],
"blog_title": self.site.config["BLOG_TITLE"],
"site_url": self.site.config["SITE_URL"],
"blog_description": self.site.config["BLOG_DESCRIPTION"],
"messages": self.site.MESSAGES,
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"tag_pages_are_indexes": self.site.config['TAG_PAGES_ARE_INDEXES'],
"index_display_post_count":
self.site.config['INDEX_DISPLAY_POST_COUNT'],
"index_teasers": self.site.config['INDEX_TEASERS'],
"rss_teasers": self.site.config["RSS_TEASERS"],
"hide_untranslated_posts": self.site.config['HIDE_UNTRANSLATED_POSTS'],
}
self.site.scan_posts()
yield self.list_tags_page(kw)
if not self.site.posts_per_tag:
yield {'basename': str(self.name), 'actions': []}
return
for tag, posts in list(self.site.posts_per_tag.items()):
post_list = [self.site.global_data[post] for post in posts]
post_list.sort(key=lambda a: a.date)
post_list.reverse()
for lang in kw["translations"]:
if kw["hide_untranslated_posts"]:
filtered_posts = [x for x in post_list if x.is_translation_available(lang)]
else:
filtered_posts = post_list
rss_post_list = [p.post_name for p in filtered_posts]
yield self.tag_rss(tag, lang, rss_post_list, kw)
# Render HTML
if kw['tag_pages_are_indexes']:
yield self.tag_page_as_index(tag, lang, filtered_posts, kw)
else:
yield self.tag_page_as_list(tag, lang, filtered_posts, kw)
# Tag cloud json file
tag_cloud_data = {}
for tag, posts in self.site.posts_per_tag.items():
tag_cloud_data[tag] = [len(posts), self.site.link(
'tag', tag, self.site.config['DEFAULT_LANG'])]
output_name = os.path.join(kw['output_folder'],
'assets', 'js', 'tag_cloud_data.json')
def write_tag_data(data):
try:
os.makedirs(os.path.dirname(output_name))
except:
pass
with codecs.open(output_name, 'wb+', 'utf8') as fd:
fd.write(json.dumps(data))
task = {
'basename': str(self.name),
'name': str(output_name)
}
task['uptodate'] = [utils.config_changed(tag_cloud_data)]
task['targets'] = [output_name]
task['actions'] = [(write_tag_data, [tag_cloud_data])]
task['clean'] = True
yield task
def list_tags_page(self, kw):
"""a global "all your tags" page for each language"""
tags = list(self.site.posts_per_tag.keys())
# We want our tags to be sorted case insensitive
tags.sort(key=lambda a: a.lower())
template_name = "tags.tmpl"
kw['tags'] = tags
for lang in kw["translations"]:
output_name = os.path.join(
kw['output_folder'], self.site.path('tag_index', None, lang))
output_name = output_name
context = {}
context["title"] = kw["messages"][lang]["Tags"]
context["items"] = [(tag, self.site.link("tag", tag, lang)) for tag
in tags]
context["permalink"] = self.site.link("tag_index", None, lang)
task = self.site.generic_post_list_renderer(
lang,
[],
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [utils.config_changed(task_cfg)]
yield task
def tag_page_as_index(self, tag, lang, post_list, kw):
"""render a sort of index page collection using only this
tag's posts."""
def page_name(tagname, i, lang):
"""Given tag, n, returns a page name."""
name = self.site.path("tag", tag, lang)
if i:
name = name.replace('.html', '-{0}.html'.format(i))
return name
# FIXME: deduplicate this with render_indexes
template_name = "index.tmpl"
# Split in smaller lists
lists = []
while post_list:
lists.append(post_list[:kw["index_display_post_count"]])
post_list = post_list[kw["index_display_post_count"]:]
num_pages = len(lists)
for i, post_list in enumerate(lists):
context = {}
# On a tag page, the feeds include the tag's feeds
rss_link = ("""<link rel="alternate" type="application/rss+xml" """
"""type="application/rss+xml" title="RSS for tag """
"""{0} ({1})" href="{2}">""".format(
tag, lang, self.site.link("tag_rss", tag, lang)))
context['rss_link'] = rss_link
output_name = os.path.join(kw['output_folder'],
page_name(tag, i, lang))
context["title"] = kw["messages"][lang][
"Posts about %s"] % tag
context["prevlink"] = None
context["nextlink"] = None
context['index_teasers'] = kw['index_teasers']
if i > 1:
context["prevlink"] = os.path.basename(
page_name(tag, i - 1, lang))
if i == 1:
context["prevlink"] = os.path.basename(
page_name(tag, 0, lang))
if i < num_pages - 1:
context["nextlink"] = os.path.basename(
page_name(tag, i + 1, lang))
context["permalink"] = self.site.link("tag", tag, lang)
context["tag"] = tag
task = self.site.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [utils.config_changed(task_cfg)]
task['basename'] = str(self.name)
yield task
def tag_page_as_list(self, tag, lang, post_list, kw):
"""We render a single flat link list with this tag's posts"""
template_name = "tag.tmpl"
output_name = os.path.join(kw['output_folder'], self.site.path(
"tag", tag, lang))
context = {}
context["lang"] = lang
context["title"] = kw["messages"][lang]["Posts about %s"] % tag
context["posts"] = post_list
context["permalink"] = self.site.link("tag", tag, lang)
context["tag"] = tag
task = self.site.generic_post_list_renderer(
lang,
post_list,
output_name,
template_name,
kw['filters'],
context,
)
task_cfg = {1: task['uptodate'][0].config, 2: kw}
task['uptodate'] = [utils.config_changed(task_cfg)]
task['basename'] = str(self.name)
yield task
def tag_rss(self, tag, lang, posts, kw):
"""RSS for a single tag / language"""
#Render RSS
output_name = os.path.join(kw['output_folder'],
self.site.path("tag_rss", tag, lang))
deps = []
post_list = [self.site.global_data[post] for post in posts if
self.site.global_data[post].use_in_feeds]
post_list.sort(key=lambda a: a.date)
post_list.reverse()
for post in post_list:
deps += post.deps(lang)
return {
'basename': str(self.name),
'name': output_name,
'file_dep': deps,
'targets': [output_name],
'actions': [(utils.generic_rss_renderer,
(lang, "{0} ({1})".format(kw["blog_title"], tag),
kw["site_url"], kw["blog_description"], post_list,
output_name, kw["rss_teasers"]))],
'clean': True,
'uptodate': [utils.config_changed(kw)],
'task_dep': ['render_posts'],
}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import memcache
import six
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import test_backend
from keystone.tests import test_utils
from keystone import token
from keystone.token.backends import memcache as token_memcache
CONF = config.CONF
class MemcacheClient(object):
"""Replicates a tiny subset of memcached client interface."""
def __init__(self, *args, **kwargs):
"""Ignores the passed in args."""
self.cache = {}
self.reject_cas = False
def add(self, key, value):
if self.get(key):
return False
return self.set(key, value)
def append(self, key, value):
existing_value = self.get(key)
if existing_value:
self.set(key, existing_value + value)
return True
return False
def check_key(self, key):
if not isinstance(key, str):
raise memcache.Client.MemcachedStringEncodingError()
def gets(self, key):
#Call self.get() since we don't really do 'cas' here.
return self.get(key)
def get(self, key):
"""Retrieves the value for a key or None."""
self.check_key(key)
obj = self.cache.get(key)
now = utils.unixtime(timeutils.utcnow())
if obj and (obj[1] == 0 or obj[1] > now):
# NOTE(morganfainberg): This behaves more like memcache
# actually does and prevents modification of the passed in
# reference from affecting the cached back-end data. This makes
# tests a little easier to write.
#
# The back-end store should only change with an explicit
# set/delete/append/etc
data_copy = copy.deepcopy(obj[0])
return data_copy
def set(self, key, value, time=0):
"""Sets the value for a key."""
self.check_key(key)
# NOTE(morganfainberg): This behaves more like memcache
# actually does and prevents modification of the passed in
# reference from affecting the cached back-end data. This makes
# tests a little easier to write.
#
# The back-end store should only change with an explicit
# set/delete/append/etc
data_copy = copy.deepcopy(value)
self.cache[key] = (data_copy, time)
return True
def cas(self, key, value, time=0, min_compress_len=0):
# Call self.set() since we don't really do 'cas' here.
if self.reject_cas:
return False
return self.set(key, value, time=time)
def reset_cas(self):
#This is a stub for the memcache client reset_cas function.
pass
def delete(self, key):
self.check_key(key)
try:
del self.cache[key]
except KeyError:
#NOTE(bcwaldon): python-memcached always returns the same value
pass
class MemcacheToken(tests.TestCase, test_backend.TokenTests):
def setUp(self):
super(MemcacheToken, self).setUp()
# Use the memcache backend for the token driver.
self.opt_in_group('token',
driver='keystone.token.backends.memcache.Token')
self.load_backends()
# Override the memcache client with the "dummy" client.
fake_client = MemcacheClient()
self.token_man = token.Manager()
self.token_man.driver = token_memcache.Token(client=fake_client)
self.token_api = self.token_man
def test_create_unicode_token_id(self):
token_id = six.text_type(self._create_token_id())
data = {'id': token_id, 'a': 'b',
'user': {'id': 'testuserid'}}
self.token_api.create_token(token_id, data)
self.token_api.get_token(token_id)
def test_create_unicode_user_id(self):
token_id = self._create_token_id()
user_id = six.text_type(uuid.uuid4().hex)
data = {'id': token_id, 'a': 'b',
'user': {'id': user_id}}
self.token_api.create_token(token_id, data)
self.token_api.get_token(token_id)
def test_list_tokens_unicode_user_id(self):
user_id = six.text_type(uuid.uuid4().hex)
self.token_api.list_tokens(user_id)
def test_flush_expired_token(self):
self.assertRaises(exception.NotImplemented,
self.token_api.flush_expired_tokens)
def test_cleanup_user_index_on_create(self):
valid_token_id = uuid.uuid4().hex
second_valid_token_id = uuid.uuid4().hex
expired_token_id = uuid.uuid4().hex
user_id = six.text_type(uuid.uuid4().hex)
expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
valid_data = {'id': valid_token_id, 'a': 'b',
'user': {'id': user_id}}
second_valid_data = {'id': second_valid_token_id, 'a': 'b',
'user': {'id': user_id}}
expired_data = {'id': expired_token_id, 'a': 'b',
'user': {'id': user_id}}
self.token_api.create_token(valid_token_id, valid_data)
self.token_api.create_token(expired_token_id, expired_data)
# NOTE(morganfainberg): Directly access the data cache since we need to
# get expired tokens as well as valid tokens. token_api._list_tokens()
# will not return any expired tokens in the list.
user_key = self.token_api.driver._prefix_user_id(user_id)
user_record = self.token_api.driver.client.get(user_key)
user_token_list = jsonutils.loads('[%s]' % user_record)
self.assertEqual(len(user_token_list), 2)
expired_token_ptk = self.token_api.driver._prefix_token_id(
expired_token_id)
expired_token = self.token_api.driver.client.get(expired_token_ptk)
expired_token['expires'] = (timeutils.utcnow() - expire_delta)
self.token_api.driver.client.set(expired_token_ptk, expired_token)
self.token_api.create_token(second_valid_token_id, second_valid_data)
user_record = self.token_api.driver.client.get(user_key)
user_token_list = jsonutils.loads('[%s]' % user_record)
self.assertEqual(len(user_token_list), 2)
def test_cas_failure(self):
self.token_api.driver.client.reject_cas = True
token_id = uuid.uuid4().hex
user_id = six.text_type(uuid.uuid4().hex)
user_key = self.token_api.driver._prefix_user_id(user_id)
token_data = jsonutils.dumps(token_id)
self.assertRaises(
exception.UnexpectedError,
self.token_api.driver._update_user_list_with_cas,
user_key, token_data)
def test_token_expire_timezone(self):
@test_utils.timezone
def _create_token(expire_time):
token_id = uuid.uuid4().hex
user_id = six.text_type(uuid.uuid4().hex)
data = {'id': token_id, 'a': 'b', 'user': {'id': user_id},
'expires': expire_time
}
self.token_api.create_token(token_id, data)
return data
for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
test_utils.TZ = 'UTC' + d
expire_time = timeutils.utcnow() + \
datetime.timedelta(minutes=1)
data_in = _create_token(expire_time)
data_get = None
data_get = self.token_api.get_token(data_in['id'])
self.assertIsNotNone(data_get, "TZ=%s" % test_utils.TZ)
self.assertEqual(data_in['id'], data_get['id'],
"TZ=%s" % test_utils.TZ)
expire_time_expired = timeutils.utcnow() + \
datetime.timedelta(minutes=-1)
data_in = _create_token(expire_time_expired)
self.assertRaises(exception.TokenNotFound,
self.token_api.get_token, data_in['id'])
class MemcacheTokenCacheInvalidation(tests.TestCase,
test_backend.TokenCacheInvalidation):
def setUp(self):
super(MemcacheTokenCacheInvalidation, self).setUp()
CONF.token.driver = 'keystone.token.backends.memcache.Token'
self.load_backends()
fake_client = MemcacheClient()
self.token_man = token.Manager()
self.token_man.driver = token_memcache.Token(client=fake_client)
self.token_api = self.token_man
self.token_provider_api.driver.token_api = self.token_api
self._create_test_data()
| |
#! /usr/bin/python
import datetime
import logging
import os
import os.path
import media_common
import pyexiv2
import sqlite3
import time
import unittest
from mock import *
class TestRepository(unittest.TestCase):
def setUp(self):
self.rep = media_common.Repository()
pass
@patch('sqlite3.dbapi2.connect')
@patch('os.access')
@patch('media_common.Repository._tree_setup')
def test_open(self, tree_setup, access, connect):
access.return_value = True
conn_mock = Mock()
cur_mock = Mock()
conn_mock.cursor.return_value = cur_mock()
connect.return_value = conn_mock
self.rep.open('/tmp/bar')
db_name = '/tmp/bar/media.db'
if os.name == 'nt':
db_name = '/tmp/bar\\media.db'
access.assert_called_with(db_name, ANY)
connect.assert_called_with(db_name)
self.assertFalse(conn_mock.cursor.called,
'expect cursor not needed for existing database')
self.assertTrue(tree_setup.called,
'expect tree_setup always called')
@patch('sqlite3.dbapi2.connect')
@patch('os.access')
@patch('media_common.Repository._tree_setup')
def test_create(self, tree_setup, access, connect):
access.return_value = False
conn_mock = Mock(name="connection_mock", spec_set=['cursor'])
cur_mock = Mock(name="cursor_mock", spec_set=['execute'])
conn_mock.cursor.return_value = cur_mock
connect.return_value = conn_mock
self.rep.open('/tmp/bar')
db_name = '/tmp/bar/media.db'
if os.name == 'nt':
db_name = '/tmp/bar\\media.db'
access.assert_called_with(db_name, ANY)
connect.assert_called_with(db_name)
self.assertTrue(conn_mock.cursor.called,
'expect cursor needed for new database')
self.assertTrue(cur_mock.execute.called,
'expect insert table called for new database')
self.assertTrue(tree_setup.called,
'expect tree_setup always called')
@patch('os.mkdir')
def test_tree_setup(self, mkdir):
media_common.Repository()._tree_setup('/tmp/foo')
self.assertEquals(2, mkdir.call_count)
def _execute_tester(self, method):
rep = media_common.Repository()
rep.con = MagicMock()
rep.con.cursor.return_value.execute.return_value = [1, 4]
self.assertEquals(rep.con.cursor.return_value, method(rep))
self.assertTrue(rep.con.cursor.return_value.execute.called)
def test_iter_all_photos(self):
rep = media_common.Repository()
rep.con = MagicMock()
rep.con.cursor.return_value.execute.return_value = [1, 4]
self.assertEquals(rep.con.cursor.return_value, rep.iter_all_photos())
self.assertTrue(rep.con.cursor.return_value.execute.called)
def test_remove_photos(self):
rep = media_common.Repository()
rep.con = MagicMock()
execute = rep.con.cursor.return_value.execute
rep.remove_photos([1, 4])
execute.assert_called_with(ANY, [1, 4])
def test_remove(self):
rep = media_common.Repository()
rep.con = MagicMock()
execute = rep.con.cursor.return_value.execute
photo = MagicMock()
photo.md5 = 42
rep.remove(photo)
execute.assert_called_with(ANY, [42])
def test_close(self):
rep = media_common.Repository()
connection = MagicMock()
rep.con = connection
rep.close()
self.assertEquals(None, rep.con, 'expect connection removed')
self.assertEquals(call.commit(), connection.mock_calls[0])
self.assertEquals(call.close(), connection.mock_calls[1])
def test_add_or_update(self):
rep = media_common.Repository()
photo = Mock()
rep.con = Mock()
rep.con.cursor.return_value.lastrowid = 42
self.assertEquals(42, rep.add_or_update(photo))
self.assertTrue(rep.con.cursor.called)
self.assertTrue(rep.con.cursor.return_value.execute.called)
self.assertTrue(rep.con.commit.called)
def test_lookup_hash(self):
rep = media_common.Repository()
rep.con = MagicMock()
fetch_mock = rep.con.cursor.return_value.execute.return_value.fetchone
fetch_mock.return_value = (42, '/tmp/foo')
self.assertEquals('/tmp/foo', rep.lookup_hash('bar')[1])
def test_lookup_hash_not_found(self):
rep = media_common.Repository()
rep.con = MagicMock()
fetch_mock = rep.con.cursor.return_value.execute.return_value.fetchone
fetch_mock.return_value = None
self.assertEquals(None, rep.lookup_hash('bar'))
def test_get_db_name(self):
rep = media_common.Repository()
self.assertEquals('media.db', rep._get_db_name())
def test_init_db(self):
rep = media_common.Repository()
cur = Mock()
rep._init_db(cur)
self.assertTrue(cur.execute.called)
class TestCollectionRepository(unittest.TestCase):
def test_add_or_update(self):
rep = media_common.CollectionRepository()
photo = Mock()
rep.con = Mock()
rep.con.cursor.return_value.lastrowid = 42
self.assertEquals(42, rep.add_or_update(photo))
self.assertTrue(rep.con.cursor.called)
self.assertTrue(rep.con.cursor.return_value.execute.called)
self.assertTrue(rep.con.commit.called)
def test_get_db_name(self):
rep = media_common.CollectionRepository()
self.assertEquals('local_media.db', rep._get_db_name())
def test_init_db(self):
rep = media_common.CollectionRepository()
cur = Mock()
rep._init_db(cur)
self.assertTrue(cur.execute.called)
class TestPhoto(unittest.TestCase):
def setUp(self):
self.photo = media_common.Photo('/tmp/foo.jpg')
self.time_struct = time.strptime('2012-08-19 15:14:04', '%Y-%m-%d %H:%M:%S')
self.timestamp = time.mktime(self.time_struct)
self.datetime_obj = datetime.datetime.fromtimestamp(self.timestamp)
pass
@patch('pyexiv2.ImageMetadata')
def test_get_path_parts(self, image):
self.photo.metadata_read = True
self.photo.timestamp = self.timestamp
self.assertEquals((2012, 8, 'foo.jpg'), self.photo.get_path_parts())
@patch('pyexiv2.ImageMetadata')
def test_get_path_parts_uninitialized(self, image):
self.photo.metadata_read = False
with patch.object(self.photo, 'load_metadata') as ts:
self.photo.timestamp = self.timestamp
self.assertEquals((2012, 8, 'foo.jpg'), self.photo.get_path_parts())
self.assertTrue(self.photo.load_metadata.called)
@patch('pyexiv2.ImageMetadata')
def test_load_metadata(self, image):
instance = image.return_value
with patch.object(self.photo, '_load_exif_timestamp') as ts:
with patch.object(self.photo, '_load_camera_make') as cmake:
with patch.object(self.photo, '_load_camera_model') as cmodel:
with patch.object(self.photo, '_load_file_size') as fs:
with patch.object(self.photo,
'_load_filesystem_timestamp') as fts:
with patch.object(self.photo, 'get_hash') as gh:
self.photo.load_metadata()
self.assertTrue(ts.called)
self.assertTrue(self.photo._load_camera_make.called)
self.assertTrue(self.photo._load_camera_model.called)
self.assertTrue(self.photo._load_file_size.called)
def test_load_exif_timestamp(self):
m = MagicMock(spec_set=['__getitem__'])
image_keys = ['Exif.Image.DateTimeOriginal']
m.__getitem__.return_value.value = self.datetime_obj
self.photo._load_exif_timestamp(m, image_keys)
self.assertEquals(self.timestamp, self.photo.timestamp)
@patch('os.path.getmtime')
def test_load_filesystem_timestamp(self, getmtime):
getmtime.return_value = self.timestamp
self.photo._load_filesystem_timestamp()
self.assertEquals(self.timestamp, self.photo.timestamp)
def test_load_camera_make(self):
m = MagicMock(spec_set=['__getitem__'])
image_keys = ['Exif.Image.Make']
m.__getitem__.return_value.value = 'Foo'
self.photo._load_camera_make(m, image_keys)
self.assertEquals('Foo', self.photo.camera_make)
def test_load_camera_model(self):
m = MagicMock(spec_set=['__getitem__'])
image_keys = ['Exif.Image.Model']
m.__getitem__.return_value.value = 'Bar'
self.photo._load_camera_model(m, image_keys)
self.assertEquals('Bar', self.photo.camera_model)
if os.name != 'nt':
class TestUtilityFunctions(unittest.TestCase):
@patch('grp.getgrnam')
def test_get_group_id(self, getgrnam):
getgrnam.return_value = [None, None, 42]
self.assertEquals(42, media_common.get_group_id('foo'))
self.assertEquals(-1, media_common.get_group_id(None))
if __name__ == '__main__':
unittest.main()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# replica_basic_test.py
# ===================================
import inspect
import logging
import os
import pprint
import signal
import subprocess
import sys
import time
import traceback
from system_test_env import SystemTestEnv
sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
from setup_utils import SetupUtils
from replication_utils import ReplicationUtils
import system_test_utils
from testcase_env import TestcaseEnv
# product specific: Kafka
import kafka_system_test_utils
import metrics
class ReplicaBasicTest(ReplicationUtils, SetupUtils):
testModuleAbsPathName = os.path.realpath(__file__)
testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName))
def __init__(self, systemTestEnv):
# SystemTestEnv - provides cluster level environment settings
# such as entity_id, hostname, kafka_home, java_home which
# are available in a list of dictionary named
# "clusterEntityConfigDictList"
self.systemTestEnv = systemTestEnv
super(ReplicaBasicTest, self).__init__(self)
# dict to pass user-defined attributes to logger argument: "extra"
d = {'name_of_class': self.__class__.__name__}
def signal_handler(self, signal, frame):
self.log_message("Interrupt detected - User pressed Ctrl+c")
# perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
sys.exit(1)
def runTest(self):
# ======================================================================
# get all testcase directories under this testsuite
# ======================================================================
testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
testCasePathNameList.sort()
replicationUtils = ReplicationUtils(self)
# =============================================================
# launch each testcase one by one: testcase_1, testcase_2, ...
# =============================================================
for testCasePathName in testCasePathNameList:
skipThisTestCase = False
try:
# ======================================================================
# A new instance of TestcaseEnv to keep track of this testcase's env vars
# and initialize some env vars as testCasePathName is available now
# ======================================================================
self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
# ======================================================================
# SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
# ======================================================================
testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
if self.systemTestEnv.printTestDescriptionsOnly:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
continue
elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
self.log_message("Skipping : " + testcaseDirName)
skipThisTestCase = True
continue
else:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
# ============================================================================== #
# ============================================================================== #
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# get optional testcase arguments
logRetentionTest = "false"
try:
logRetentionTest = self.testcaseEnv.testcaseArgumentsDict["log_retention_test"]
except:
pass
consumerMultiTopicsMode = "false"
try:
consumerMultiTopicsMode = self.testcaseEnv.testcaseArgumentsDict["consumer_multi_topics_mode"]
except:
pass
autoCreateTopic = "false"
try:
autoCreateTopic = self.testcaseEnv.testcaseArgumentsDict["auto_create_topic"]
except:
pass
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"] = []
# initialize signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
self.testcaseEnv.testcasePropJsonPathName)
# clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
# for collecting logs from remote machines
kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
# TestcaseEnv - initialize producer & consumer config / log file pathnames
kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
# generate remote hosts log/config dirs if not exist
kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# generate properties files for zookeeper, kafka, producer, consumer:
# 1. copy system_test/<suite_name>_testsuite/config/*.properties to
# system_test/<suite_name>_testsuite/testcase_<n>/config/
# 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
# by overriding the settings specified in:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
self.testcaseEnv, self.systemTestEnv)
# =============================================
# preparing all entities to start the test
# =============================================
self.log_message("starting zookeepers")
kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 2s")
time.sleep(2)
self.log_message("starting brokers")
kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
if autoCreateTopic.lower() == "false":
self.log_message("creating topics")
kafka_system_test_utils.create_topic(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
# =============================================
# start ConsoleConsumer if this is a Log Retention test
# =============================================
if logRetentionTest.lower() == "true":
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
time.sleep(1)
# =============================================
# starting producer
# =============================================
self.log_message("starting producer in the background")
kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False)
msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
time.sleep(int(msgProducingFreeTimeSec))
# =============================================
# A while-loop to bounce leader as specified
# by "num_iterations" in testcase_n_properties.json
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
brokerType = self.testcaseEnv.testcaseArgumentsDict["broker_type"]
bounceBrokerFlag = self.testcaseEnv.testcaseArgumentsDict["bounce_broker"]
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
self.log_message("bounce_broker flag : " + bounceBrokerFlag)
leaderDict = None
controllerDict = None
stoppedBrokerEntityId = ""
# ==============================================
# Find out the entity id for the stopping broker
# ==============================================
if brokerType == "leader" or brokerType == "follower":
self.log_message("looking up leader")
leaderDict = kafka_system_test_utils.get_leader_attributes(self.systemTestEnv, self.testcaseEnv)
# ==========================
# leaderDict looks like this:
# ==========================
#{'entity_id': u'3',
# 'partition': '0',
# 'timestamp': 1345050255.8280001,
# 'hostname': u'localhost',
# 'topic': 'test_1',
# 'brokerid': '3'}
if brokerType == "leader":
stoppedBrokerEntityId = leaderDict["entity_id"]
self.log_message("Found leader with entity id: " + stoppedBrokerEntityId)
else: # Follower
self.log_message("looking up follower")
# a list of all brokers
brokerEntityIdList = system_test_utils.get_data_from_list_of_dicts(self.systemTestEnv.clusterEntityConfigDictList, "role", "broker", "entity_id")
# we pick the first non-leader broker as the follower
firstFollowerEntityId = None
for brokerEntityId in brokerEntityIdList:
if brokerEntityId != leaderDict["entity_id"]:
firstFollowerEntityId = brokerEntityId
break
stoppedBrokerEntityId = firstFollowerEntityId
self.log_message("Found follower with entity id: " + stoppedBrokerEntityId)
elif brokerType == "controller":
self.log_message("looking up controller")
controllerDict = kafka_system_test_utils.get_controller_attributes(self.systemTestEnv, self.testcaseEnv)
# ==========================
# controllerDict looks like this:
# ==========================
#{'entity_id': u'3',
# 'timestamp': 1345050255.8280001,
# 'hostname': u'localhost',
# 'brokerid': '3'}
stoppedBrokerEntityId = controllerDict["entity_id"]
self.log_message("Found controller with entity id: " + stoppedBrokerEntityId)
# =============================================
# Bounce the broker
# =============================================
if bounceBrokerFlag.lower() == "true":
if brokerType == "leader":
# validate to see if leader election is successful
self.log_message("validating leader election")
kafka_system_test_utils.validate_leader_election_successful(self.testcaseEnv, leaderDict, self.testcaseEnv.validationStatusDict)
# trigger leader re-election by stopping leader to get re-election latency
#reelectionLatency = kafka_system_test_utils.get_reelection_latency(self.systemTestEnv, self.testcaseEnv, leaderDict, self.leaderAttributesDict)
#latencyKeyName = "Leader Election Latency - iter " + str(i) + " brokerid " + leaderDict["brokerid"]
#self.testcaseEnv.validationStatusDict[latencyKeyName] = str("{0:.2f}".format(reelectionLatency * 1000)) + " ms"
#self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"].append("{0:.2f}".format(reelectionLatency * 1000))
elif brokerType == "follower":
# stopping Follower
self.log_message("stopping follower with entity id: " + firstFollowerEntityId)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, firstFollowerEntityId, self.testcaseEnv.entityBrokerParentPidDict[firstFollowerEntityId])
elif brokerType == "controller":
# stopping Controller
self.log_message("stopping controller : " + controllerDict["brokerid"])
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, controllerDict["entity_id"], self.testcaseEnv.entityBrokerParentPidDict[controllerDict["entity_id"]])
brokerDownTimeInSec = 5
try:
brokerDownTimeInSec = int(self.testcaseEnv.testcaseArgumentsDict["broker_down_time_in_sec"])
except:
pass # take default
time.sleep(brokerDownTimeInSec)
# starting previously terminated broker
self.log_message("starting the previously terminated broker")
kafka_system_test_utils.start_entity_in_background(self.systemTestEnv, self.testcaseEnv, stoppedBrokerEntityId)
else:
# GC Pause simulation
pauseTime = None
try:
hostname = leaderDict["hostname"]
pauseTime = self.testcaseEnv.testcaseArgumentsDict["pause_time_in_seconds"]
parentPid = self.testcaseEnv.entityBrokerParentPidDict[leaderDict["entity_id"]]
pidStack = system_test_utils.get_remote_child_processes(hostname, parentPid)
system_test_utils.simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTime)
except:
pass
self.anonLogger.info("sleeping for 60s")
time.sleep(60)
i += 1
# while loop
# update Leader Election Latency MIN/MAX to testcaseEnv.validationStatusDict
#self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = None
#try:
# self.testcaseEnv.validationStatusDict["Leader Election Latency MIN"] = \
# min(self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"])
#except:
# pass
#
#self.testcaseEnv.validationStatusDict["Leader Election Latency MAX"] = None
#try:
# self.testcaseEnv.validationStatusDict["Leader Election Latency MAX"] = \
# max(self.testcaseEnv.userDefinedEnvVarDict["leaderElectionLatencyList"])
#except:
# pass
# =============================================
# tell producer to stop
# =============================================
self.testcaseEnv.lock.acquire()
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(1)
# =============================================
# wait for producer thread's update of
# "backgroundProducerStopped" to be "True"
# =============================================
while 1:
self.testcaseEnv.lock.acquire()
self.logger.info("status of backgroundProducerStopped : [" + \
str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
time.sleep(1)
self.logger.info("all producer threads completed", extra=self.d)
break
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(2)
# =============================================
# collect logs from remote hosts to find the
# minimum common offset of a certain log
# segment file among all replicas
# =============================================
minStartingOffsetDict = None
if logRetentionTest.lower() == "true":
self.anonLogger.info("sleeping for 60s to make sure log truncation is completed")
time.sleep(60)
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
minStartingOffsetDict = kafka_system_test_utils.getMinCommonStartingOffset(self.systemTestEnv, self.testcaseEnv)
print
pprint.pprint(minStartingOffsetDict)
# =============================================
# starting debug consumer
# =============================================
if consumerMultiTopicsMode.lower() == "false":
self.log_message("starting debug consumers in the background")
kafka_system_test_utils.start_simple_consumer(self.systemTestEnv, self.testcaseEnv, minStartingOffsetDict)
self.anonLogger.info("sleeping for 10s")
time.sleep(10)
# =============================================
# starting console consumer
# =============================================
if logRetentionTest.lower() == "false":
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
time.sleep(10)
# =============================================
# this testcase is completed - stop all entities
# =============================================
self.log_message("stopping all entities")
for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
# make sure all entities are stopped
kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
# =============================================
# collect logs from remote hosts
# =============================================
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# =============================================
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
if logRetentionTest.lower() == "true":
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
elif consumerMultiTopicsMode.lower() == "true":
kafka_system_test_utils.validate_data_matched_in_multi_topics_from_single_consumer_producer(
self.systemTestEnv, self.testcaseEnv, replicationUtils)
else:
kafka_system_test_utils.validate_simple_consumer_data_matched_across_replicas(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
kafka_system_test_utils.validate_index_log(self.systemTestEnv, self.testcaseEnv)
# =============================================
# draw graphs
# =============================================
metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv,
self.systemTestEnv.clusterEntityConfigDictList)
# build dashboard, one for each role
metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv.testCaseDashboardsDir,
self.systemTestEnv.clusterEntityConfigDictList)
except Exception as e:
self.log_message("Exception while running test {0}".format(e))
traceback.print_exc()
finally:
if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
This module contains convertors for converting MODS to XML required by URN\:NBN
project.
See:
- http://resolver.nkp.cz/api/v3/digDocRegistration.xsd
- https://code.google.com/p/czidlo/wiki/ApiV3
"""
# Imports =====================================================================
import xmltodict
import dhtmlparser
from dhtmlparser import first
from xml_composer import MonographComposer
from xml_composer import MultiMonoComposer
# Functions & classes =========================================================
def pick_only_text(fn):
def get_only_text(*args, **kwargs):
out = fn(*args, **kwargs)
if not out:
return out
if "#text" in out:
return out["#text"]
return out
return get_only_text
def first_or_none(fn):
def get_first_or_none(*args, **kwargs):
out = fn(*args, **kwargs)
if not out:
return None
return first(out).getContent().decode("utf-8")
return get_first_or_none
class MonographPublication(object):
"""
This class accepts MODS monographic data, which can then convert to XML
for URN\:NBN.
"""
def __init__(self, mods_xml):
self.mods_xml = mods_xml
self.dom = dhtmlparser.parseString(mods_xml)
self.xdom = xmltodict.parse(mods_xml)
self.composer = MonographComposer()
def _get_title_info(self):
return self.dom.wfind(
"mods:mods"
).wfind(
"mods:titleInfo",
fn=lambda x: x.params.get("type") != "alternative"
)
@first_or_none
def get_title(self):
"""
Returns:
str: Title
"""
titles = self._get_title_info().find("mods:title")
if not titles:
ValueError("<mods:title> element not found.")
return titles
@first_or_none
def get_subtitle(self):
"""
Returns:
str: Subtitle
"""
return self._get_title_info().find("mods:subTitle")
@first_or_none
def get_author(self):
"""
Returns:
str: Author's name.
"""
author = self.dom.match(
"mods:mods",
["mods:name", {"type": "personal", "usage": "primary"}],
{
"tag_name": "mods:namePart",
"fn": lambda x: x.params.get("type", "") != "date"
}
)
if not author:
author = self.dom.match(
"mods:mods",
["mods:name", {"type": "corporate"}]
)
if not author:
author = self.dom.match(
"mods:mods",
["mods:name", {"type": 'conference'}]
)
return author
def get_form(self):
"""
Returns:
str: Form of the book. Electronic source, and so on..
"""
forms = self.dom.match(
"mods:mods",
"mods:physicalDescription",
{
"tag_name": "mods:form",
"fn": lambda x: x.params.get("authority", "") == "gmd"
}
)
if not forms:
return
return forms[0].getContent().decode("utf-8")
def _get_description(self):
return self.xdom["mods:mods"].get("mods:originInfo", None)
def get_place(self):
"""
Returns:
str: Place where the book was released.
"""
place = self.dom.match(
"mods:originInfo",
"mods:place",
["mods:placeTerm", {"type": "text"}]
)
if not place:
return
return place[0].getContent().decode("utf-8")
@pick_only_text
def get_publisher(self):
"""
Returns:
str: Name of the publisher.
"""
if not self._get_description():
return
return self._get_description().get("mods:publisher", None)
@pick_only_text
def get_year(self):
"""
Returns:
str: Year when the book was released.
"""
if not self._get_description():
return
return self._get_description().get("mods:dateIssued", None)
@pick_only_text
def get_identifier(self, name):
"""
Returns:
str: Identifier from ``<mods:identifier>`` which has \
``@type == name``.
"""
identifier = filter(
lambda x: x.get("@type", False) == name,
self.xdom["mods:mods"].get("mods:identifier", [])
)
if not identifier:
return
return identifier[0]
@pick_only_text
def get_ccnb(self):
"""
Returns:
str: CCNB identification string.
"""
return self.get_identifier("ccnb")
@pick_only_text
def get_isbn(self):
"""
Returns:
str: ISBN.
"""
return self.get_identifier("isbn")
@pick_only_text
def get_uuid(self):
"""
Returns:
str: UUID.
"""
return self.get_identifier("uuid")
def compose(self):
"""
Convert `self` to nested ordered dicts, which may be serialized to XML
using ``xmltodict`` module.
Returns:
OrderedDict: XML parsed to ordered dicts.
"""
self.composer.title = self.get_title()
self.composer.subtitle = self.get_subtitle()
self.composer.ccnb = self.get_ccnb()
self.composer.isbn = self.get_isbn()
self.composer.other_id = self.get_uuid()
self.composer.document_type = self.get_form()
self.composer.digital_born = True
self.composer.author = self.get_author()
self.composer.place = self.get_place()
self.composer.publisher = self.get_publisher()
self.composer.year = self.get_year()
def add_format(self, file_format):
"""
Add informations about `file_format` to internal XML dict.
Args:
file_format (str): ``PDF``, ``jpeg``, etc..
"""
self.composer.format = file_format
def to_xml(self):
"""
Convert itself to XML unicode string.
Returns:
unicode: XML.
"""
self.compose()
return self.composer.to_xml()
def __str__(self):
return self.to_xml()
class MonographVolume(MonographPublication):
"""
Conversion of Multi-monograph data to XML required by URN\:NBN.
"""
def __init__(self, mods_xml):
super(MonographVolume, self).__init__(mods_xml)
self.composer = MultiMonoComposer()
@pick_only_text
def get_volume_title(self):
"""
Returns:
str: Title of the whole volume.
"""
title_info = self.dom.match(
"mods:mods",
"mods:titleInfo",
"mods:partNumber"
)
if not title_info:
title_info = self.dom.match(
"mods:mods",
"mods:titleInfo",
"mods:partName"
)
if not title_info:
raise ValueError("Can't find volumeTitle!")
return title_info[0].getContent().decode("utf-8")
def compose(self):
"""
Convert `self` to nested ordered dicts, which may be serialized to XML
using ``xmltodict`` module.
Returns:
OrderedDict: XML parsed to ordered dicts.
"""
super(MonographVolume, self).compose()
self.composer.volume_title = self.get_volume_title()
def convert_mono_xml(mods_xml, file_format):
"""
Convert MODS monograph record to XML, which is required by URN:NBN
resolver.
Args:
mods_xml (str): MODS volume XML.
Returns:
str: XML for URN:NBN resolver.
Raises:
ValueError: If can't find required data in MODS (author, title).
"""
pub = MonographPublication(mods_xml)
pub.add_format(file_format)
return pub.to_xml()
def convert_mono_volume_xml(mods_volume_xml, file_format):
"""
Convert MODS monograph, multi-volume record to XML, which is required by
URN:NBN resolver.
Args:
mods_volume_xml (str): MODS volume XML.
Returns:
str: XML for URN:NBN resolver.
Raises:
ValueError: If can't find required data in MODS (author, title).
"""
pub = MonographVolume(mods_volume_xml)
pub.add_format(file_format)
return pub.to_xml()
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification of CodeEvaluation."""
__author__ = 'Koji Ashida'
from core.tests import test_utils
from extensions.rules import code_evaluation
class CodeNormalizationUnitTests(test_utils.GenericTestBase):
"""Tests the normalization of code strings."""
TEST_DATA = [{
'before': (
'def x():\n'
' y = 345'),
'after': (
'def x():\n'
' y = 345'),
}, {
# Indentation gets converted to 4 spaces. Trailing whitespace and empty
# lines are removed.
'before': (
'def x(): \n'
' \n'
' y = 345\n'
' \n'
' '),
'after': (
'def x():\n'
' y = 345'),
}, {
# Full-line comments are removed, but not comments starting in the
# middle of a line.
'before': (
'# This is a comment.\n'
' # This is a comment with some spaces before it.\n'
'def x(): # And a comment with some code before it.\n'
' y = \'#String with hashes#\''),
'after': (
'def x(): # And a comment with some code before it.\n'
' y = \'#String with hashes#\''),
}, {
# Complex indentation is handled correctly.
'before': (
'abcdefg\n'
' hij\n'
' ppppp\n'
'x\n'
' abc\n'
' bcd\n'
' cde\n'
' xxxxx\n'
' y\n'
' z'),
'after': (
'abcdefg\n'
' hij\n'
' ppppp\n'
'x\n'
' abc\n'
' bcd\n'
' cde\n'
' xxxxx\n'
' y\n'
'z'),
}]
def test_code_normalization(self):
for test in self.TEST_DATA:
self.assertEqual(
code_evaluation.normalize_code(test['before']), test['after'])
class CodeEvaluationRuleUnitTests(test_utils.GenericTestBase):
"""Tests for rules operating on CodeEvaluation objects."""
def test_code_equals_rule(self):
rule = code_evaluation.CodeEquals(
'def x():\n'
' y = \'ab c\'\n'
' return x')
self.assertFuzzyTrue(rule.eval({
'code': (
'def x():\n'
' y = \'ab c\'\n'
' return x'
),
'output': 'Original code',
'evaluation': '',
'error': ''
}))
self.assertFuzzyTrue(rule.eval({
'code': (
'def x():\n'
' y = \'ab c\'\n'
' \n'
' return x'
),
'output': 'Extra newline with spaces',
'evaluation': '',
'error': ''
}))
self.assertFuzzyTrue(rule.eval({
'code': (
'def x(): \n'
' y = \'ab c\'\n'
' return x'
),
'output': 'Extra trailing whitespace on first line',
'evaluation': '',
'error': ''
}))
self.assertFuzzyTrue(rule.eval({
'code': (
'def x(): \t\n'
' y = \'ab c\'\n'
' return x\n\n\n'
),
'output': 'Extra trailing whitespace; tab character in first line',
'evaluation': '',
'error': ''
}))
self.assertFuzzyFalse(rule.eval({
'code': (
'def x():\n'
' y = \'ab c\'\n'
' return x'
),
'output': 'Changing spaces at start of a line',
'evaluation': '',
'error': ''
}))
self.assertFuzzyFalse(rule.eval({
'code': (
'def x():'
' y = \'ab c\'\n'
' return x'
),
'output': 'Missing newline in first line',
'evaluation': '',
'error': ''
}))
self.assertFuzzyFalse(rule.eval({
'code': (
'def x():'
' y = \'ab c\'\n'
' return x'
),
'output': 'Changing spaces inside quotes',
'evaluation': '',
'error': ''
}))
def test_output_equals_rule(self):
rule = code_evaluation.OutputEquals('1')
self.assertFuzzyTrue(rule.eval({
'code': 'hello',
'output': '1',
'evaluation': '',
'error': ''
}))
self.assertFuzzyTrue(rule.eval({
'code': 'hello',
'output': '\n1\n ',
'evaluation': '',
'error': ''
}))
self.assertFuzzyFalse(rule.eval({
'code': 'hello',
'output': '',
'evaluation': '',
'error': ''
}))
self.assertFuzzyFalse(rule.eval({
'code': 'hello',
'output': 'bad output',
'evaluation': '',
'error': ''
}))
def test_fuzzy_matches_rule(self):
rule = code_evaluation.FuzzyMatches([{
'code': 'def func():\n return 1\nprint func()',
'output': '1',
'evaluation': '',
'error': ''
}])
# The same code should match.
self.assertFuzzyTrue(rule.eval({
'code': 'def func():\n return 1\nprint func()',
'output': '1',
'evaluation': '',
'error': ''
}))
# Extra whitespacing should not matter for the fuzzy match.
self.assertFuzzyTrue(rule.eval({
'code': '\ndef func():\n return 1\n\n\nprint func()\n',
'output': '1',
'evaluation': '',
'error': ''
}))
# Comments should make no difference for the comparison.
self.assertFuzzyTrue(rule.eval({
'code': (
'# A func that returns 1.\ndef func():\n return 1\n\n# Now '
'print it.\nprint func()'),
'output': '1',
'evaluation': '',
'error': ''
}))
# Renaming the identifiers should fail due to the current fuzzy rule
# not doing very intelligent normalization.
self.assertFuzzyFalse(rule.eval({
'code': 'def ret_one():\n return 1\nprint ret_one()',
'output': '1',
'evaluation': '',
'error': ''
}))
# Different code should not match.
self.assertFuzzyFalse(rule.eval({
'code': 'print (1+2)',
'output': '1',
'evaluation': '',
'error': ''
}))
| |
"""TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import os, sys
import unittest
from test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
class DBEnvClosedEarlyCrash(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.filename = "test"
def tearDown(self):
test_support.rmtree(self.homeDir)
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d2 = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
self.assertRaises(db.DBNoSuchFileError, d2.open,
self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0666)
d.put("test","this is a test")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
dbenv.close() # This "close" should close the child db handle also
self.assertRaises(db.DBError, d.get, "test")
def test02_close_dbenv_before_dbcursor(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close() # This "close" should close the child db handle also
# db.close should close the child cursor
self.assertRaises(db.DBError,c.next)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
c=d.cursor()
c.first()
c.next()
dbenv.close()
# The "close" should close the child db handle also, with cursors
self.assertRaises(db.DBError, c.next)
def test03_close_db_before_dbcursor_without_env(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
c.next()
d.close()
# The "close" should close the child db handle also
self.assertRaises(db.DBError, c.next)
def test04_close_massive(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
dbs=[db.DB(dbenv) for i in xrange(16)]
cursors=[]
for i in dbs :
i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs[10].put("test","this is a test")
dbs[10].put("test2","another test")
dbs[10].put("test3","another one")
self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
for i in dbs :
cursors.extend([i.cursor() for j in xrange(32)])
for i in dbs[::3] :
i.close()
for i in cursors[::3] :
i.close()
# Check for missing exception in DB! (after DB close)
self.assertRaises(db.DBError, dbs[9].get, "test")
# Check for missing exception in DBCursor! (after DB close)
self.assertRaises(db.DBError, cursors[101].first)
cursors[80].first()
cursors[80].next()
dbenv.close() # This "close" should close the child db handle also
# Check for missing exception! (after DBEnv close)
self.assertRaises(db.DBError, cursors[80].next)
def test05_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbenv.close() # This "close" should close the child db handle also
del d
try:
import gc
except ImportError:
gc = None
if gc:
# force d.__del__ [DB_dealloc] to be called
gc.collect()
def test06_close_txn_before_dup_cursor(self) :
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_CREATE)
d = db.DB(dbenv)
txn = dbenv.txn_begin()
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
txn=txn)
d.put("XXX", "yyy", txn=txn)
txn.commit()
txn = dbenv.txn_begin()
c1 = d.cursor(txn)
c2 = c1.dup()
self.assertEquals(("XXX", "yyy"), c1.first())
# Not interested in warnings about implicit close.
import warnings
if sys.version_info < (2, 6) :
# Completely resetting the warning state is
# problematic with python >=2.6 with -3 (py3k warning),
# because some stdlib modules selectively ignores warnings.
warnings.simplefilter("ignore")
txn.commit()
warnings.resetwarnings()
else :
# When we drop support for python 2.3 and 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# txn.commit()
#
# We can not use "with" as is, because it would be invalid syntax
# in python 2.3, 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
w = warnings.catch_warnings()
w.__enter__()
try :
warnings.simplefilter("ignore")
txn.commit()
finally :
w.__exit__()
self.assertRaises(db.DBCursorClosedError, c2.first)
if db.version() > (4,3,0) :
def test07_close_db_before_sequence(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
dbs=db.DBSequence(d)
d.close() # This "close" should close the child DBSequence also
dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| |
#!/usr/bin/python
from Tkinter import *
import ttk
from PIL import Image, ImageTk
import os
import re
import csv
def selectExp(event):
app.selectExp(event)
def closeTab(event):
app.closeTabMiddleClick(event)
class Application(Frame):
def scrapeExperiments(self):
listall = os.listdir("XRD/Processed")
self.files = [re.match(r'(...)(...)([^.]*)\.(...)',f) for f in listall]
for m in self.files:
if not m:
print "File not recognized"
self.files = [m for m in self.files if m]
self.semesters = sorted(list(set([m.group(1) for m in self.files])),lambda x,y: (cmp(int(x[1:]),int(y[1:])) if not x[1:] == y[1:] else cmp(y[0],x[0])))
self.numbers = []
self.exps = []
for sem in self.semesters:
self.numbers.append(sorted(list(set([m.group(2) for m in self.files if m.group(1) == sem])),lambda x,y: cmp(int(x),int(y))))
self.exps.append([])
for n in self.numbers[-1]:
self.exps[-1].append(sorted(list(set([m.group(3) for m in self.files if m.group(1) == sem and m.group(2) == n and m.group(4) == 'txt']))))
def createWidgets(self):
self.chooseTrial = ttk.Treeview(self)
self.fillExperiments(self.chooseTrial)
self.chooseTrial.bind('<Double-Button-1>',selectExp)
self.chooseTrial.bind('<Button-3>',self.overlayExp)
self.chooseTrial["height"] = 35
self.chooseTrial.pack(side=LEFT)
self.nb = ttk.Notebook(self)
self.makeTabs(self.nb)
for title,tab in self.tabs:
self.nb.add(tab, text=title)
self.nb.bind("<Button-2>",closeTab)
self.nb.enable_traversal()
self.nb.pack()
self.closeTabButton = Button(self)
self.closeTabButton["text"] = "Close Tab"
self.closeTabButton["command"] = self.closeTab
self.closeTabButton.pack()
self.quitButton = Button(self)
self.quitButton["text"] = "Quit"
self.quitButton["command"] = self.quit
self.quitButton.pack()
def closeTab(self):
self.nb.forget("current")
def closeTabMiddleClick(self,event):
self.nb.forget("@%d,%d" % (event.x,event.y))
def dispExp(self):
# self.trialLabel["text"] = self.fileroot
self.makeTabs(self.nb)
for title,tab in self.tabs:
self.nb.add(tab, text=title)
def fillExperiments(self,tree):
root = tree.insert('','end',text='Experiments',open=True)
for sem,num,exp in zip(self.semesters,self.numbers,self.exps):
sid = tree.insert(root,'end',text=sem,open=True)
for n,ex in zip(num,exp):
nid = tree.insert(sid,'end',text=n,open=True)
for e in ex:
tree.insert(nid,'end',text=e,open=True)
def selectExp(self,event):
tree = event.widget
node = tree.focus()
if tree.parent(node):
if tree.parent(tree.parent(node)):
if tree.parent(tree.parent(tree.parent(node))):
for file in self.hijitosOf(tree.item(tree.parent(tree.parent(node)))["text"], tree.item(tree.parent(node))["text"], tree.item(node)["text"]):
self.fileroot = file
self.dispExp()
else:
for file in self.hijitosOf(tree.item(tree.parent(node))["text"],tree.item(node)["text"]):
self.fileroot = file
self.dispExp()
else:
for file in self.hijitosOf(tree.item(node)["text"]):
self.fileroot = file
self.dispExp()
else:
for file in self.hijitosOf():
self.fileroot = file
self.dispExp()
self.nb.select(self.tabs[0][1])
def overlayExp(self,event):
tree = event.widget
node = tree.identify_row(event.y)
if tree.parent(node):
if tree.parent(tree.parent(node)):
if tree.parent(tree.parent(tree.parent(node))):
othertrials = " ".join(self.hijitosOf(tree.item(tree.parent(tree.parent(node)))["text"], tree.item(tree.parent(node))["text"], tree.item(node)["text"]))
else:
othertrials = " ".join(self.hijitosOf(tree.item(tree.parent(node))["text"],tree.item(node)["text"]))
else:
othertrials = " ".join(self.hijitosOf(tree.item(node)["text"]))
else:
othertrials = " ".join(self.hijitosOf())
if os.name == "nt":
os.system(".\\runr.bat %s %s" % (self.fileroot,othertrials))
else:
os.system("Rscript createplot.r %s %s" % (self.fileroot,othertrials))
self.fileroot = self.nb.tab(self.nb.select(),option="text")
self.makeTabs(self.nb)
self.nb.insert(self.nb.select(),self.tabs[0][1],text=self.fileroot)
self.nb.forget("current")
def hijitosOf(self, semester='', number='', experiment=''):
res = []
for sem,num,exp in zip(self.semesters,self.numbers,self.exps):
if semester == '' or semester == sem:
for nu,ex in zip(num,exp):
if number == '' or number == nu:
for e in ex:
if experiment == '' or experiment == e:
res.append(sem + nu + e)
return res
def onModifiedXrd(self,widget,event):
widget["text"] = "Save*"
def saveXrdNotes(self,notes,save,fileroot):
notes.edit_modified(0)
f = open("data/"+fileroot+"_xrdnotes.txt",'w')
f.write(notes.get("0.0", "end"))
f.close()
save["text"] = "Save"
def saveEdNotes(self,eN,rows,eS,fileroot,appendage):
with open("data/"+fileroot+"_exp"+appendage+".csv",'w') as csvfile:
csvwriter = csv.writer(csvfile,delimiter='\t',quotechar='|')
for r in rows:
csvwriter.writerow([e.get() for e in r])
def makeTabs(self,nb):
self.tabs = []
frame = Frame(nb)
frame.grid()
xrdProcessed = ttk.Notebook(frame)
images = ["only", "all"]
for img in images:
image = Image.open("XRD/Processed/%s_%s.jpg" % (self.fileroot,img))
image = image.resize((653,500), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(image)
t = Frame(xrdProcessed)
pic = Label(t,image=photo)
pic["height"] = 500
pic.image = photo
pic.grid()
xrdProcessed.add(t,text=img)
xrdProcessed.grid(column = 0, row = 0)
xrdNotes = Text(frame)
xrdSave = Button(frame)
xrdNotes["height"] = 5
xrdNotes.bind("<<Modified>>",lambda e:self.onModifiedXrd(xrdSave,e))
xrdNotes.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveXrdNotes(xrdNotes,xrdSave,f))
if os.path.isfile("data/%s_xrdnotes.txt" % self.fileroot):
f = open("data/%s_xrdnotes.txt" % self.fileroot,'r')
xrdNotes.insert("end",f.read())
f.close()
xrdNotes.edit_modified(0)
xrdNotes.grid(column = 0, row = 1)
xrdSave["text"] = "Save"
xrdSave["command"] = lambda f=self.fileroot:self.saveXrdNotes(xrdNotes,xrdSave,f)
xrdSave.grid(column = 0, row = 2)
if not os.path.isfile("data/%s_graph.png" % self.fileroot) or \
os.path.getmtime("data/%s_graph.png" % self.fileroot) < os.path.getmtime("XRD/Processed/%s.txt" % self.fileroot) or \
os.path.getmtime("data/%s_graph.png" % self.fileroot) < os.path.getmtime("createplot.r"):
if os.name == "nt":
os.system(".\\runr.bat %s" % self.fileroot)
else:
os.system("Rscript createplot.r %s" % self.fileroot)
img = Image.open("data/%s_graph.png" % self.fileroot)
graph = ImageTk.PhotoImage(img)
xrdGraphs = Label(frame,image=graph)
xrdGraphs.image = graph
xrdGraphs.grid(column = 1, row = 0)
experimentNotes = Frame(frame)
experimentSave = Button(frame)
rows = self.fillExperimentNotes(experimentNotes,experimentSave)
experimentNotes.grid(column = 1, row = 1)
experimentSave["text"] = "Save"
experimentSave["command"] = lambda f=self.fileroot:self.saveEdNotes(experimentNotes,experimentSave,rows,f)
# experimentNotes.grid(column = 1, row = 2)
fesem = Label(frame)
fesem["text"] = ["FESEM\n", "data"]
fesem.grid(column = 2, row = 0)
self.tabs.append( (self.fileroot, frame) )
def fillExperimentNotes(self,eN,eS):
eN1 = Frame(eN)
eN2 = Frame(eN)
eN1.pack()
eN2.pack()
if not os.path.isfile("data/%s_exp1.csv" % self.fileroot):
f = open("data/%s_exp1.csv" % self.fileroot,'w')
f.write("1\ttesting\n2\tand stuff\n3\ttesting\n4\tand stuff\n5\ttesting\n6\tand stuff\n7\ttesting\n8\tand stuff")
f.close()
with open("data/%s_exp1.csv" % self.fileroot) as csvfile:
exp1 = csv.reader(csvfile, delimiter="\t", quotechar = "|")
rows1 = []
for i,r in enumerate(exp1):
rows1.append((Entry(eN1),Entry(eN1)))
a,b = rows1[-1]
a.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN1,rows1,eS,f,"1"))
b.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN1,rows1,eS,f,"1"))
a["width"] = 5
a["justify"] = "right"
a.insert("end",r[0])
b.insert("end",r[1])
a.grid(column = 0, row = i)
b.grid(column = 1, row = i)
if not os.path.isfile("data/%s_exp2.csv" % self.fileroot):
f = open("data/%s_exp2.csv" % self.fileroot,'w')
f.write("Act.\t12:00 AM\t01/01/2013\t1:00 AM\t01/01/2013\nCool\t1:00 AM\t01/01/2013\t2:00 AM\t01/01/2013")
f.close()
with open("data/%s_exp2.csv" % self.fileroot) as csvfile:
exp2 = csv.reader(csvfile, delimiter="\t", quotechar = "|")
rows2 = []
for i,r in enumerate(exp2):
rows2.append((Entry(eN2),Entry(eN2),Entry(eN2),Entry(eN2),Entry(eN2)))
a,b,c,d,e = rows2[-1]
a.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN2,rows2,eS,f,"2"))
b.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN2,rows2,eS,f,"2"))
c.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN2,rows2,eS,f,"2"))
d.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN2,rows2,eS,f,"2"))
e.bind("<FocusOut>",lambda e,f=self.fileroot:self.saveEdNotes(eN2,rows2,eS,f,"2"))
a["width"] = 5
a["justify"] = "right"
a.insert("end",r[0])
b.insert("end",r[1])
c.insert("end",r[2])
d.insert("end",r[3])
e.insert("end",r[4])
a.grid(column = 0, row = i)
b.grid(column = 1, row = i)
c.grid(column = 2, row = i)
d.grid(column = 3, row = i)
e.grid(column = 4, row = i)
# row = []
# for i in range(0,9):
# row.append((Entry(eN),Entry(eN)))
# a,b = row[-1]
# a["width"] = 5
# a["justify"] = "right"
# a.insert("end",str(i+1))
# a.grid(column = 2, row = i)
# b.grid(column = 3, row = i,columnspan=2)
# start = Entry(eN)
# end = Entry(eN)
# act = Entry(eN)
# cool = Entry(eN)
#
# start["width"] = 6
# end["width"] = 6
# act["width"] = 6
# cool["width"] = 6
#
# start.grid(column=1,row=8,columnspan=2)
# end.grid(column=3,row=8,columnspan=2)
# act.grid(column=0,row=9)
# cool.grid(column=0,row=10)
#
# ast = Entry(eN)
# ast["width"] = 10
# ast.grid(column=1,row=9)
# asd = Entry(eN)
# asd["width"] = 10
# asd.grid(column=2,row=9)
# aet = Entry(eN)
# aet["width"] = 10
# aet.grid(column=3,row=9)
# aed = Entry(eN)
# aed["width"] = 10
# aed.grid(column=4,row=9)
# cst = Entry(eN)
# cst["width"] = 10
# cst.grid(column=1,row=10)
def __init__(self, master=None):
self.scrapeExperiments()
self.fileroot = "F13001SAL"
Frame.__init__(self, master)
self.pack()
self.createWidgets()
root = Tk()
app = Application(master=root)
app.mainloop()
root.destroy()
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2016, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import PRODUCT
from py2neo.database.auth import keyring
from py2neo.database.status import GraphError, Unauthorized
from py2neo.packages.httpstream import http, ClientError, ServerError, \
Resource as _Resource, ResourceTemplate as _ResourceTemplate
from py2neo.packages.httpstream.http import JSONResponse, user_agent
from py2neo.packages.httpstream.numbers import UNAUTHORIZED
from py2neo.packages.httpstream.packages.urimagic import URI
from py2neo.util import raise_from
http.default_encoding = "UTF-8"
_http_headers = {
(None, None, None): [
("User-Agent", user_agent(PRODUCT)),
("X-Stream", "true"),
],
}
def set_http_header(key, value, scheme=None, host=None, port=None):
""" Add an HTTP header for all future requests. If a `host_port` is
specified, this header will only be included in requests to that
destination.
:arg key: name of the HTTP header
:arg value: value of the HTTP header
:arg scheme:
:arg host:
:arg port:
"""
address_key = (scheme, host, port)
if address_key in _http_headers:
_http_headers[address_key].append((key, value))
else:
_http_headers[address_key] = [(key, value)]
def get_http_headers(scheme, host, port):
"""Fetch all HTTP headers relevant to the `host_port` provided.
:arg scheme:
:arg host:
:arg port:
"""
uri_headers = {}
for (s, h, p), headers in _http_headers.items():
if (s is None or s == scheme) and (h is None or h == host) and (p is None or p == port):
uri_headers.update(headers)
for address, auth in keyring.items():
if auth and address.host == host and address.http_port == port:
uri_headers["Authorization"] = auth.http_authorization
return uri_headers
class Resource(_Resource):
""" Base class for all local resources mapped to remote counterparts.
"""
def __init__(self, uri, metadata=None, headers=None):
uri = URI(uri)
self._resource = _Resource.__init__(self, uri)
self._headers = dict(headers or {})
self.__base = super(Resource, self)
if metadata is None:
self.__initial_metadata = None
else:
self.__initial_metadata = dict(metadata)
self.__last_get_response = None
uri = uri.string
dbms_uri = uri[:uri.find("/", uri.find("//") + 2)] + "/"
if dbms_uri == uri:
self.__dbms = self
else:
from py2neo.database import DBMS
self.__dbms = DBMS(dbms_uri)
self.__ref = NotImplemented
@property
def graph(self):
""" The parent graph of this resource.
:rtype: :class:`.Graph`
"""
return self.__dbms.graph
@property
def headers(self):
""" The HTTP headers sent with this resource.
"""
headers = get_http_headers(self.__uri__.scheme, self.__uri__.host, self.__uri__.port)
headers.update(self._headers)
return headers
@property
def metadata(self):
""" Metadata received in the last HTTP response.
"""
if self.__last_get_response is None:
if self.__initial_metadata is not None:
return self.__initial_metadata
self.get()
return self.__last_get_response.content
def resolve(self, reference, strict=True):
""" Resolve a URI reference against the URI for this resource,
returning a new resource represented by the new target URI.
:arg reference: Relative URI to resolve.
:arg strict: Strict mode flag.
:rtype: :class:`.Resource`
"""
return Resource(_Resource.resolve(self, reference, strict).uri)
@property
def dbms(self):
""" The root service associated with this resource.
:return: :class:`.DBMS`
"""
return self.__dbms
def get(self, headers=None, redirect_limit=5, **kwargs):
""" Perform an HTTP GET to this resource.
:arg headers: Extra headers to pass in the request.
:arg redirect_limit: Maximum number of times to follow redirects.
:arg kwargs: Other arguments to pass to the underlying `httpstream` method.
:rtype: :class:`httpstream.Response`
:raises: :class:`py2neo.GraphError`
"""
headers = dict(self.headers, **(headers or {}))
kwargs.update(cache=True)
try:
response = self.__base.get(headers=headers, redirect_limit=redirect_limit, **kwargs)
except (ClientError, ServerError) as error:
if error.status_code == UNAUTHORIZED:
raise Unauthorized(self.uri.string)
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP GET returned response %s" % error.status_code)
raise_from(GraphError(message, **content), error)
else:
self.__last_get_response = response
return response
def put(self, body=None, headers=None, **kwargs):
""" Perform an HTTP PUT to this resource.
:arg body: The payload of this request.
:arg headers: Extra headers to pass in the request.
:arg kwargs: Other arguments to pass to the underlying `httpstream` method.
:rtype: :class:`httpstream.Response`
:raises: :class:`py2neo.GraphError`
"""
headers = dict(self.headers, **(headers or {}))
try:
response = self.__base.put(body, headers, **kwargs)
except (ClientError, ServerError) as error:
if error.status_code == UNAUTHORIZED:
raise Unauthorized(self.uri.string)
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP PUT returned response %s" % error.status_code)
raise_from(GraphError(message, **content), error)
else:
return response
def post(self, body=None, headers=None, **kwargs):
""" Perform an HTTP POST to this resource.
:arg body: The payload of this request.
:arg headers: Extra headers to pass in the request.
:arg kwargs: Other arguments to pass to the underlying `httpstream` method.
:rtype: :class:`httpstream.Response`
:raises: :class:`py2neo.GraphError`
"""
headers = dict(self.headers, **(headers or {}))
try:
response = self.__base.post(body, headers, **kwargs)
except (ClientError, ServerError) as error:
if error.status_code == UNAUTHORIZED:
raise Unauthorized(self.uri.string)
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP POST returned response %s" % error.status_code)
raise_from(GraphError(message, **content), error)
else:
return response
def delete(self, headers=None, **kwargs):
""" Perform an HTTP DELETE to this resource.
:arg headers: Extra headers to pass in the request.
:arg kwargs: Other arguments to pass to the underlying `httpstream` method.
:rtype: :class:`httpstream.Response`
:raises: :class:`py2neo.GraphError`
"""
headers = dict(self.headers, **(headers or {}))
try:
response = self.__base.delete(headers, **kwargs)
except (ClientError, ServerError) as error:
if error.status_code == UNAUTHORIZED:
raise Unauthorized(self.uri.string)
if isinstance(error, JSONResponse):
content = dict(error.content, request=error.request, response=error)
else:
content = {}
message = content.pop("message", "HTTP DELETE returned response %s" % error.status_code)
raise_from(GraphError(message, **content), error)
else:
return response
class ResourceTemplate(_ResourceTemplate):
""" A factory class for producing :class:`.Resource` objects dynamically
based on a template URI.
"""
#: The class of error raised by failure responses from resources produced by this template.
error_class = GraphError
def expand(self, **values):
""" Produce a resource instance by substituting values into the
stored template URI.
:arg values: A set of named values to plug into the template URI.
:rtype: :class:`.Resource`
"""
return Resource(self.uri_template.expand(**values))
| |
#!/usr/bin/env python
"""The JIP command line package contains utilities and the modules
that expose command line functions for the JIP command. The module hosts
a set of utility functions that can be used to simplify the process of
interacting with the JIP API from within a command line tool.
Functions in this module might have certain limitations when you want to use
them as general API calls. Most of the output generation functions print to
`stdout` and this can not be changed. In addition, be very careful the
:py:func:`dry`, it calles ``sys.exit(1)`` in case of a failure.
.. warning:: Both :py:func:`run` and :py:func:`dry` call ``sys.exit(1)`` in
case of a failure! Be very careful when you want to call them
outside of a command line tool that is allowed terminate!
.. note:: Please note that you can use the module to implement custom command
line utilities, but it was written to support the commands that are
shipped with JIP. That means the modules functions might change
according to the needs of the internal command line utilities.
"""
from datetime import timedelta, datetime
import os
import sys
from jip.vendor.texttable import Texttable
import jip.cluster
import jip.db
import jip.jobs
import jip.logger
import jip.profiles
log = jip.logger.getLogger('job.cli')
##############################################################################
# Color definitions
##############################################################################
NORMAL = ''
BLACK = '\033[90m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
MAGENTA = '\033[95m'
CYAN = '\033[96m'
WHITE = '\033[97m'
ENDC = '\033[0m'
#: Maps job states to colors
STATE_COLORS = {
jip.db.STATE_DONE: GREEN,
jip.db.STATE_FAILED: RED,
jip.db.STATE_HOLD: YELLOW,
jip.db.STATE_QUEUED: NORMAL,
jip.db.STATE_RUNNING: BLUE,
jip.db.STATE_CANCELED: YELLOW
}
STATE_CHARS = {
jip.db.STATE_DONE: "#",
jip.db.STATE_FAILED: "X",
jip.db.STATE_HOLD: "H",
jip.db.STATE_QUEUED: "#",
jip.db.STATE_RUNNING: "*",
jip.db.STATE_CANCELED: "C"
}
def resolve_job_range(ids):
"""Resolve ranges from a list of ids. Given list of id strings
can contain ranges separated with '-'. For example, '1-10' will
result in a range from 1..10.
:param ids: string or list of strings of ids
:type ids: string or list of strings
:returns: resolved list of ids
:rtype: list of integers
:raises ValueError: if on of the ids could not be converted to a valid,
positive id
"""
if not isinstance(ids, (list, tuple)):
ids = [ids]
r = []
def to_id(i):
try:
v = int(i)
if v < 0:
raise ValueError("Job ids have to be >= 0!")
return v
except:
raise ValueError("Unable to convert '%s' to a job id. A valid"
" job id has to be a number" % i)
for i in ids:
s = i.split("-")
if len(s) == 1:
r.append(to_id(i))
elif len(s) == 2:
start = to_id(s[0])
end = to_id(s[1])
start, end = min(start, end), max(start, end)
r.extend(range(start, end + 1))
else:
raise ValueError("Unable to guess a job range from %s" % i)
return r
def parse_args(docstring, argv=None, options_first=True):
"""Parse the command line options
:param docstring: the docstring that will be parsed
:param argv: the arguments. Defaults to sys.argv if this is not specified
:returns: parsed options as dictionary
"""
from jip.vendor.docopt import docopt
argv = sys.argv[1:] if argv is None else argv
return docopt(docstring, argv=argv, options_first=options_first)
def parse_job_ids(args, read_stdin=True):
"""Resolves job and clsuter ids specified in the args --job
and --cluster-job options. In additon, this reads job ids from
``stdin``.
:param args: parsed command line options
:returns: tuple of job ids and cluster ids
"""
####################################################################
# Query jobs
####################################################################
job_ids = args["--job"]
cluster_ids = args["--cluster-job"]
####################################################################
# read job id's from pipe
####################################################################
job_ids = [] if job_ids is None else resolve_job_range(job_ids)
cluster_ids = [] if cluster_ids is None else resolve_job_range(cluster_ids)
if read_stdin:
job_ids += read_ids_from_pipe()
return job_ids, cluster_ids
def show_dry(jobs, options=None, profiles=False):
"""Print the dry-run table to stdout
:param jobs: list of jobs
:param options: the parent script options
:param profiles: render job profiles table
"""
#############################################################
# Print general options
#############################################################
if options and len(jobs) > 1:
show_options(options,
"Pipeline Configuration",
['help', 'dry', 'force'])
#############################################################
# print job options
#############################################################
for job in jobs:
show_options(job.restore_configuration(),
"Job - %s" % str(job))
#############################################################
# print job states
#############################################################
show_job_states(jobs)
if profiles:
show_job_profiles(jobs)
if len(jobs) > 1:
show_job_tree(jobs)
def show_commands(jobs):
"""Print the commands for the given list of jobs
:param jobs: list of jobs
:type jobs: list of :class:`jip.db.Job`
"""
print ""
print "Job commands"
print "------------"
for g in jip.jobs.create_groups(jobs):
job = g[0]
deps = [str(d) for j in g
for d in j.dependencies if d not in g]
name = "|".join(str(j) for j in g)
print "%s %s -- Interpreter: %s %s" % (
colorize("###", YELLOW),
colorize(name, BLUE),
colorize(job.interpreter, GREEN),
("Dependencies: " + colorize(",".join(deps), BLUE)) if deps else ""
)
# print log files
print "%s stdout: %s" % (
colorize("###", YELLOW),
colorize(job.stdout if job.stdout else "<default>", BLUE)
)
print "%s stderr: %s" % (
colorize("###", YELLOW),
colorize(job.stderr if job.stderr else "<default>", BLUE)
)
for i, j in enumerate(g):
if i > 0:
if not j.group_from:
print "|"
print j.command
print colorize("###", YELLOW)
def show_options(options, title=None, excludes=None, show_defaults=True):
"""Print the options to a table
:param options: the options
:type options: :class:`jip.options.Options`
:param title: a title for the table
:param excludes: list of option names that will be excluded
:param show_defaults: if True, all options will be printed, otherwise,
only options that are different from their default
value will be included
"""
if title is not None:
print "#" * 87
print "| {name:^91} |".format(name=colorize(title, BLUE))
rows = []
excludes = excludes if excludes is not None else ['help']
for o in options:
# disable any rendering
o.render_context = None
if (show_defaults or o.raw() != o.default) and o.name not in excludes:
rows.append([o.name, _clean_value(o.raw())])
print render_table(["Name", "Value"], rows, widths=[30, 50],
deco=Texttable.VLINES |
Texttable.BORDER |
Texttable.HEADER)
def show_job_states(jobs, title="Job states"):
"""Print the job states table for a list of jobs.
:param jobs: list of jobs
:type jobs: list of :class:`jip.db.Job`
:param title: a title for the table
"""
if title is not None:
print "#" * 149
print "| {name:^153} |".format(
name=colorize(title, BLUE)
)
rows = []
for g in jip.jobs.create_groups(jobs):
job = g[0]
name = "|".join(str(j) for j in g)
outs = [_clean_value(f) for j in g for f in j.tool.get_output_files()]
ins = [_clean_value(f) for j in g for f in j.tool.get_input_files()]
for j in [jj for jj in g if jj.additional_options]:
for a in j.additional_options:
ins.append(_clean_value(a.raw()))
state = colorize(job.state, STATE_COLORS[job.state])
rows.append([name, state, ", ".join(ins), ", ".join(outs)])
print render_table(["Name", "State", "Inputs", "Outputs"], rows,
widths=[30, 6, 50, 50],
deco=Texttable.VLINES |
Texttable.BORDER |
Texttable.HEADER)
def show_job_profiles(jobs, title="Job profiles"):
"""Print the job profile for a given list of jobs.
The job profile contains the following properties:
Name
The job name
Queue
The queue assigned to the job
Priority
The jobs priority
Threads
Number of threads assigned to the job
Time
Maximum run time assigned to the job
Memory
Maximum memory assigned to the job
Account
The account assigned to the job
Directory
The jobs working directory
:param jobs: list of jobs
:type jobs: list of :class:`jip.db.Job`
:param title: a title for the table
"""
if title is not None:
print "#" * 149
print "| {name:^153} |".format(name=colorize(title, BLUE))
rows = []
for g in jip.jobs.create_groups(jobs):
job = g[0]
name = "|".join(str(j) for j in g)
rows.append([
name,
job.queue,
job.priority,
job.threads,
timedelta(seconds=job.max_time * 60) if job.max_time else None,
job.max_memory,
job.account,
os.path.relpath(job.working_directory)
])
print render_table([
"Name",
"Queue",
"Priority",
"Threads",
"Time",
"Memory",
"Account",
"Directory"],
rows,
widths=[30, 10, 10, 8, 12, 8, 10, 36],
deco=Texttable.VLINES |
Texttable.BORDER |
Texttable.HEADER
)
def show_job_tree(jobs, title="Job hierarchy"):
"""Prints the job hierarchy as a tree structure
:param jobs: list of jobs
:type jobs: list of :class:`jip.db.Job`
:param title: a title for the table
"""
if title is not None:
print "#" * 21
print "| {name:^25} |".format(name=colorize(title, BLUE))
print "#" * 21
done = set([])
counts = {}
def draw_node(job, levels=None, parents=None, level=0, last=False):
if job in done:
return False
done.add(job)
parents.add(job)
## build the separator based on the levels list and the current
## level
sep = "".join([u'\u2502 ' if j > 0 else " "
for j in levels[:level - 1]]
if level > 0 else [])
# reduce the lecel counter
if level > 0:
levels[level - 1] = levels[level - 1] - 1
# build the edge and the label
edge = "" if not level else (u'\u2514\u2500' if last
else u'\u251C\u2500')
label = "%s%s" % (edge, job)
if level == 0 and job.pipeline:
label += " (%s)" % colorize(job.pipeline, BLUE)
# collect other dependencies that are node covered
# by the tree
other_deps = ",".join(str(j) for j in job.dependencies
if j not in parents)
if len(other_deps) > 0:
label = "%s <- %s" % (colorize(label, YELLOW), other_deps)
# print the separator and the label
print ("%s%s" % (sep, label)).encode('utf-8')
# update levels used by the children
# and do the recursive call
num = counts[job]
levels = levels + [num]
i = 0
for child in job.children:
if draw_node(child, levels=levels,
parents=parents, level=level + 1,
last=(i == (num - 1))):
i += 1
return True
def count_children(job, counts):
if job in counts:
return
counts[job] = 0
done.add(job)
for child in job.children:
if child not in done:
counts[job] = counts[job] + 1
count_children(child, counts)
for job in jobs:
if len(job.dependencies) == 0:
count_children(job, counts)
done = set([])
for job in jobs:
if len(job.dependencies) == 0:
draw_node(job, levels=[], parents=set([]), level=0)
print "#" * 21
print "| Tasks: {j:>18} |".format(j=colorize(len(jobs), BLUE))
print "| Jobs: {g:>19} |".format(
g=colorize(len(jip.jobs.create_groups(jobs)), BLUE)
)
print "| Named Groups: {g:>11} |".format(
g=colorize(len(set(map(lambda x: x.pipeline, jobs))), BLUE)
)
print "| Job Groups: {g:>13} |".format(
g=colorize(len(filter(lambda x: len(x.dependencies) == 0, jobs)), BLUE)
)
print "#" * 21
def _clean_value(v):
cwd = os.getcwd()
# make the printed option relative to cwd
# to avoid extreme long paths
def __cl(s):
if isinstance(s, basestring) and len(s) > 0 and s.startswith(cwd):
return os.path.relpath(s)
return s
if isinstance(v, (list, tuple)):
v = [__cl(x) if not isinstance(x, file) else "<<STREAM>>"
for x in v]
else:
v = __cl(v) if not isinstance(v, file) else "<<STREAM>>"
return v
def colorize(string, color):
"""Colorize a string using ANSI colors.
The `jip.cli` module contains a few ANSI color definitions that
are used quiet often in the system.
:param string: the string to colorize
:param color: the color that should be used
"""
if color == NORMAL:
return string
return "%s%s%s" % (color, string, ENDC)
def table_to_string(value, empty=""):
"""Translates the given value to a string
that can be rendered in a table. This functions deals primarily with
``datatime.datetime`` and ``datetime.timedelta`` values. For all
other types, the default string representation is returned.
:param value: the value
:param empty: the replacement used for ``None`` value
:returns: table compatible string representation
:rtype: string
"""
if value is None:
return empty
if isinstance(value, datetime):
return value.strftime('%H:%M %d/%m/%y')
if isinstance(value, timedelta):
## round timedelta to seconds
value = timedelta(days=value.days,
seconds=value.seconds)
return str(value)
def create_table(header, rows, empty="", to_string=table_to_string,
widths=None, deco=Texttable.HEADER):
"""Create a table.
:param header: list of table column names
:param rows: list of list of row values
:param empty: string representation for ``None`` values
:param to_string: function reference to the converter function that
creates string representation for row values
:param width: optional list of columns widths
:param deco: Texttable decorations
:returns: Texttable table instance
"""
t = Texttable(0)
t.set_deco(deco)
if header is not None:
t.header(header)
if widths is not None:
t.set_cols_width(widths)
map(t.add_row, [[to_string(x, empty=empty) for x in r]
for r in rows])
return t
def render_table(header, rows, empty="", widths=None,
to_string=table_to_string, deco=Texttable.HEADER):
"""Create a simple ASCII table and returns its string representation.
:param header: list of table column names
:param rows: list of list of row values
:param empty: string representation for ``None`` values
:param to_string: function reference to the converter function that
creates string representation for row values
:param width: optional list of columns widths
:returns: string representation of the table
"""
return create_table(header, rows, empty=empty,
widths=widths, to_string=to_string, deco=deco).draw()
def confirm(msg, default=True):
"""Print the message and ask the user to confirm. Return True
if the user confirmed with Y.
:param msg: the message
:param default: Default answer
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = "[y/n]"
elif default:
prompt = "[Y/n]"
else:
prompt = "[y/N]"
question = "%s %s:" % (msg, prompt)
sys.stdout.write(question)
while True:
choice = raw_input()
if default is not None and choice == '':
return default
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("\nPlease respond with 'yes' or 'no' "
"(or 'y' or 'n').\n\n")
sys.stdout.write(question)
def read_ids_from_pipe():
"""Read job ids from a stream"""
import sys
job_ids = []
if not sys.stdin.isatty():
for line in sys.stdin:
job_ids.append(line.strip().split("\t")[0])
# reopen stdin
sys.stdin = open('/dev/tty', 'r')
return job_ids
def dry(script, script_args, dry=True, show=False):
"""Load the script and initialize it with the given arguments, then
perform a dry run and print the options and commands
.. warning:: This method calls ``sys.exit(1)`` in case an Exception
is raised
:param script: the script
:param script_args: script arguments
:param dry: print job options
:param show: print job commands
"""
# we handle --dry and --show separately,
# create the jobs and call the show commands
jobs = jip.jobs.create_jobs(script, args=script_args)
if dry:
show_dry(jobs, options=script.options
if isinstance(script, jip.tools.Tool) else None)
if show:
show_commands(jobs)
try:
jip.jobs.check_output_files(jobs)
jip.jobs.check_queued_jobs(jobs)
except Exception as err:
print >>sys.stderr, "%s\n" % (colorize("Validation error!", RED))
print >>sys.stderr, str(err)
sys.exit(1)
| |
from __future__ import absolute_import, unicode_literals
"""
Django settings for decisions project.
Generated by 'django-admin startproject' using Django 1.9.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pjqe_zw4qxc52*m+=_4(2w))m!5d6ur%=e@dl7w%#g4$u7pf!e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# second party apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'django.contrib.sites',
'django.contrib.gis',
# third party apps
'haystack',
'tagging',
'sitemetrics',
# first party apps
'decisions.ahjo',
'decisions.subscriptions',
'decisions.comments',
'decisions.news',
'decisions.geo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'decisions.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "decisions", "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'decisions.subscriptions.context_processors.metrics',
],
},
},
]
WSGI_APPLICATION = 'decisions.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'helsinkidecisions',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = [
'decisions.subscriptions.backends.EmailModelBackend'
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
_ = lambda s: s
LANGUAGE_CODE = 'fi'
LANGUAGES = (
('fi', _('Finnish')),
)
TIME_ZONE = 'Europe/Helsinki'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
os.path.join(BASE_DIR, "decisions", "locale")
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "decisions", "static"),
]
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh.idx'),
},
}
DEFAULT_FROM_EMAIL = "noreply@example.com"
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_URL = os.environ.get("SITE_URL", "http://localhost:8000")
SITE_NAME = "Decisions"
LOGIN_URL = "/account/login/"
BROKER_URL = 'redis://localhost:6379/10'
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': [
'localhost:6379'
],
'OPTIONS': {
'DB': 11,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
'MAX_CONNECTIONS': 1000,
'PICKLE_VERSION': -1,
},
},
}
# Set up logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'logging.NullHandler',
},
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': []
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
}
}
# Celery
from celery.schedules import crontab
CELERY_TIMEZONE = "Europe/Helsinki"
CELERYBEAT_SCHEDULE = {
# Update the database and index frequently
"fetch-index": {
'task': 'decisions.ahjo.tasks.fetch_index',
'schedule': crontab(minute=5),
},
# Update geoindex about as frequently as new data is fetched
"geoindex": {
'task': 'decisions.geo.tasks.update_geoindex',
'schedule': crontab(minute=10),
},
# Send email reasonably infrequently
"process": {
'task': 'decisions.ahjo.tasks.process',
'schedule': crontab(hour=11, minute=45)
}
}
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = "json"
# Tagging
FORCE_LOWERCASE_TAGS = True
if "DATABASE_URL" in os.environ:
# Heroku database integration
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=60)
DATABASES['default'].update(db_from_env)
if "MAILGUN_SMTP_SERVER" in os.environ:
# Heroku Mailgun integration
DEFAULT_FROM_EMAIL = "noreply@cryptic-earth-25359.herokuapp.com"
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ["MAILGUN_SMTP_SERVER"]
EMAIL_PORT = os.environ["MAILGUN_SMTP_PORT"]
EMAIL_USER = os.environ["MAILGUN_SMTP_USER"]
EMAIL_PASSWORD = os.environ["MAILGUN_SMTP_PASSWORD"]
SITE_ID = 1
| |
with open("input.txt") as f:
data = f.read().strip()
intcode = [int(a) for a in data.split(",")]
from collections import defaultdict
from random import choice
def run_yield(nums, get_input):
ops = {
1: {
'in': 2,
'out': 1,
'op': lambda a, b: a+b,
},
2: {
'in': 2,
'out': 1,
'op': lambda a, b: a*b,
},
3: {
'in': 0,
'out': 1,
'op': lambda: get_input(),
},
4: {
'in': 1,
'out': 0,
'op': lambda a: OUTPUT.append(a),
'yield': True,
},
5: {
'in': 2,
'out': 0,
'op': lambda a, b: b if a != 0 else None,
'jump': True,
},
6: {
'in': 2,
'out': 0,
'op': lambda a, b: b if a == 0 else None,
'jump': True,
},
7: {
'in': 2,
'out': 1,
'op': lambda a,b: 1 if a < b else 0,
},
8: {
'in': 2,
'out': 1,
'op': lambda a,b: 1 if a == b else 0,
},
9: {
'in': 1,
'out': 0,
'op': lambda a: a,
'rel': True,
},
99: {
'in': 0,
'out': 0,
'op': lambda: None,
'exit': True,
},
}
pc = 0
rel = 0
OUTPUT = []
# Unlimited memory
nums = defaultdict(lambda: 0, enumerate(nums))
while True:
opcode = nums[pc] % 100
val = int(nums[pc] / 100)
i, j, k = int(val/100), int(val/10)%10, val%10
if opcode in ops:
mode = [k, j, i]
mode_index = 0
inputs = []
for input_n in range(1, ops[opcode]['in']+1):
a = nums[pc+input_n]
if mode[mode_index] == 0:
a = nums[a]
elif mode[mode_index] == 1:
pass
elif mode[mode_index] == 2:
a = nums[a + rel]
mode_index += 1
inputs.append(a)
out = ops[opcode]['op'](*inputs)
if ops[opcode]['out'] == 1:
a = nums[pc+1+ops[opcode]['in']]
if mode[mode_index] == 0:
nums[a] = out
elif mode[mode_index] == 1:
pass
elif mode[mode_index] == 2:
nums[a + rel] = out
mode_index += 1
if ops[opcode].get('yield', False):
yield OUTPUT[-1]
if ops[opcode].get('rel', False):
rel += out
if ops[opcode].get('exit', False):
break
if ops[opcode].get('jump', False) and out is not None:
pc = out
else:
pc += 1 + ops[opcode]['in'] + ops[opcode]['out']
else:
raise Exception('opcode={}'.format(opcode))
return OUTPUT
# END OF INTCODE INTERPRETER
def print_area(area):
image = []
min_x = min(x for y,x in area.keys())
max_x = max(x for y,x in area.keys())
min_y = min(y for y,x in area.keys())
max_y = max(y for y,x in area.keys())
for y in range(min_y, max_y+1):
row = []
for x in range(min_x, max_x+1):
val = area.get((y,x), " ")
row.append(val)
image.append("".join(row))
print("Y:[{}, {}] X:[{},{}]".format(min_y, max_y, min_x, max_x))
print("\n".join(image))
def dir_to_pos(dir):
return {1: (-1, 0), 2: (1, 0), 3: (0, 1), 4: (0, -1)}[dir]
def pos_sum(x,y):
return x[0] + y[0], x[1] + y[1]
def bfs(area, goal, pos):
visited = {pos: 0}
l = [pos]
while l:
p = l[0]
l = l[1:]
for y,x in (pos_sum(p, dir_to_pos(i)) for i in (1,2,3,4)):
if (y,x) in visited:
continue
if area.get((y,x), "#") != "#":
l.append((y,x))
visited[(y,x)] = visited[p] + 1
if (y,x) == goal:
return visited[(y,x)]
def bfs_full(area, pos):
visited = {pos: 0}
l = [pos]
while l:
p = l[0]
l = l[1:]
for y,x in (pos_sum(p, dir_to_pos(i)) for i in (1,2,3,4)):
if (y,x) in visited:
continue
if area.get((y,x), "#") != "#":
l.append((y,x))
visited[(y,x)] = visited[p] + 1
return max(visited.values())
def return_dir(dir):
return {1: 2, 2: 1, 3: 4, 4: 3,None:None}[dir]
def solve(intcode):
area = {}
area[(0,0)] = "D"
pos = 0, 0 # y, x
opos = None
last_input = [None]
dfs_state = {}
def input_func():
if pos not in dfs_state:
unvisited = []
for i in (1,2,3,4):
y,x = pos_sum(pos, dir_to_pos(i))
if area.get((y,x), " ") == " ":
unvisited.append(i)
dfs_state[pos] = {
'backtrack': return_dir(last_input[0]),
'unvisited': unvisited,
'unvisited_index': 0,
}
state = dfs_state[pos]
if state['unvisited_index'] == len(state['unvisited']):
last_input[0] = state['backtrack']
else:
last_input[0] = state['unvisited'][state['unvisited_index']]
state['unvisited_index'] += 1
# Special case: we have mapped the whole area, just return something valid
if last_input[0] is None:
return 1
return last_input[0]
for t, reply in enumerate(run_yield(intcode, input_func)):
# Quit when all of the area has been mapped
if last_input[0] is None:
break
if reply == 0:
c = pos_sum(pos, dir_to_pos(last_input[0]))
area[c] = "#"
elif reply == 1:
area[pos] = "."
pos = pos_sum(pos, dir_to_pos(last_input[0]))
area[pos] = "D"
elif reply == 2:
area[pos] = "."
pos = pos_sum(pos, dir_to_pos(last_input[0]))
area[pos] = "D"
opos = pos
else:
raise Exception("Unknown reply {}".format(reply))
print("Final map after BFS of {} iterations".format(t))
area[(0,0)] = "S"
area[opos] = "O"
print_area(area)
print()
goal = opos
pos = 0, 0
print("Solve BFS from {} to {}".format(pos, goal))
p1 = bfs(area, goal, pos)
p2 = bfs_full(area, goal)
return p1, p2
print(solve(intcode))
| |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
def test_recovery_from_coordinator_exception(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def test_recovery_from_non_preemption_in_coordinator(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
def test_recovery_from_session_getting_stuck(self):
with self.test_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.common import base
from ironicclient.common import utils
from ironicclient import exc
CREATION_ATTRIBUTES = ['chassis_uuid', 'driver', 'driver_info', 'extra',
'uuid', 'properties']
class Node(base.Resource):
def __repr__(self):
return "<Node %s>" % self._info
class NodeManager(base.Manager):
resource_class = Node
@staticmethod
def _path(id=None):
return '/v1/nodes/%s' % id if id else '/v1/nodes'
def list(self, associated=None, maintenance=None, marker=None, limit=None,
detail=False, sort_key=None, sort_dir=None):
"""Retrieve a list of nodes.
:param associated: Optional, boolean whether to return a list of
associated or unassociated nodes.
:param maintenance: Optional, boolean value that indicates whether
to get nodes in maintenance mode ("True"), or not
in maintenance mode ("False").
:param marker: Optional, the UUID of a node, eg the last
node from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of nodes to return.
2) limit == 0, return the entire list of nodes.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param detail: Optional, boolean whether to return detailed information
about nodes.
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:returns: A list of nodes.
"""
if limit is not None:
limit = int(limit)
filters = utils.common_filters(marker, limit, sort_key, sort_dir)
if associated is not None:
filters.append('associated=%s' % associated)
if maintenance is not None:
filters.append('maintenance=%s' % maintenance)
path = ''
if detail:
path += 'detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "nodes")
else:
return self._list_pagination(self._path(path), "nodes",
limit=limit)
def list_ports(self, node_id, marker=None, limit=None, sort_key=None,
sort_dir=None, detail=False):
"""List all the ports for a given node.
:param node_id: The UUID of the node.
:param marker: Optional, the UUID of a port, eg the last
port from a previous result set. Return
the next result set.
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of ports to return.
2) limit == 0, return the entire list of ports.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Ironic API
(see Ironic's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about ports.
:returns: A list of ports.
"""
if limit is not None:
limit = int(limit)
filters = utils.common_filters(marker, limit, sort_key, sort_dir)
path = "%s/ports" % node_id
if detail:
path += '/detail'
if filters:
path += '?' + '&'.join(filters)
if limit is None:
return self._list(self._path(path), "ports")
else:
return self._list_pagination(self._path(path), "ports",
limit=limit)
def get(self, node_id):
try:
return self._list(self._path(node_id))[0]
except IndexError:
return None
def get_by_instance_uuid(self, instance_uuid):
path = "detail?instance_uuid=%s" % instance_uuid
nodes = self._list(self._path(path), 'nodes')
# get all the details of the node assuming that
# filtering by instance_uuid returns a collection
# of one node if successful.
if len(nodes) == 1:
return nodes[0]
else:
raise exc.NotFound()
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, node_id):
return self._delete(self._path(node_id))
def update(self, node_id, patch):
return self._update(self._path(node_id), patch)
def vendor_passthru(self, **kwargs):
node_id = kwargs['node_id']
method = kwargs['method']
args = kwargs['args']
path = self._path(node_id) + "/vendor_passthru/%s" % method
return self._update(path, args, method='POST')
def set_power_state(self, node_id, state):
path = "%s/states/power" % node_id
if state in ['on', 'off']:
state = "power %s" % state
if state in ['reboot']:
state = "rebooting"
target = {'target': state}
return self._update(self._path(path), target, method='PUT')
def validate(self, node_uuid):
path = "%s/validate" % node_uuid
return self.get(path)
def set_provision_state(self, node_uuid, state):
path = "%s/states/provision" % node_uuid
target = {'target': state}
return self._update(self._path(path), target, method='PUT')
def states(self, node_uuid):
path = "%s/states" % node_uuid
return self.get(path)
def get_console(self, node_uuid):
path = "%s/states/console" % node_uuid
info = self.get(path)
if not info:
return {}
return info.to_dict()
def set_console_mode(self, node_uuid, enabled):
path = "%s/states/console" % node_uuid
target = {'enabled': enabled}
return self._update(self._path(path), target, method='PUT')
def set_boot_device(self, node_uuid, boot_device, persistent=False):
path = "%s/management/boot_device" % node_uuid
target = {'boot_device': boot_device, 'persistent': persistent}
return self._update(self._path(path), target, method='PUT')
def get_boot_device(self, node_uuid):
path = "%s/management/boot_device" % node_uuid
return self.get(path).to_dict()
def get_supported_boot_devices(self, node_uuid):
path = "%s/management/boot_device/supported" % node_uuid
return self.get(path).to_dict()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.types import InvalidCredsError
from libcloud.common.dimensiondata import DimensionDataAPIException, NetworkDomainServicePlan
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
from libcloud.compute.base import Node, NodeAuthPassword, NodeLocation
from libcloud.test import MockHttp
from libcloud.test.compute import TestCaseMixin
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
class DimensionDataTests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.conn_classes = (None, DimensionDataMockHttp)
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
try:
self.driver = DimensionData(*DIMENSIONDATA_PARAMS, region='blah')
except ValueError:
pass
def test_invalid_creds(self):
DimensionDataMockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
self.assertTrue(
False) # Above command should have thrown an InvalidCredsException
except InvalidCredsError:
pass
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_node = ret[0]
self.assertEqual(first_node.id, 'NA3')
self.assertEqual(first_node.name, 'US - West')
self.assertEqual(first_node.country, 'US')
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 3)
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, 'default')
def test_reboot_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.reboot()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_destroy_node_response(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
node.destroy()
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_create_node_response(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=network,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword('pass123')
location = self.driver.ex_get_location_by_id('NA9')
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
node = self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node',
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False)
self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
def test_create_node_no_network(self):
rootPw = NodeAuthPassword('pass123')
image = self.driver.list_images()[0]
try:
self.driver.create_node(name='test2', image=image, auth=rootPw,
ex_description='test2 node', ex_network=None,
ex_isStarted=False)
except ValueError:
pass
def test_ex_shutdown_graceful(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_shutdown_graceful(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_start_node(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_start_node(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_power_off(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = 'INPROGRESS'
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
try:
self.driver.ex_power_off(node)
self.assertTrue(
False) # above command should have thrown DimensionDataAPIException
except DimensionDataAPIException:
pass
def test_ex_reset(self):
node = Node(id='11', name=None, state=None,
public_ips=None, private_ips=None, driver=self.driver)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_ex_attach_node_to_vlan(self):
node = self.driver.ex_get_node_by_id('e75ead52-692f-4314-8725-c8a4f4d13a87')
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
ret = self.driver.ex_attach_node_to_vlan(node, vlan)
self.assertTrue(ret)
def test_ex_destroy_nic(self):
node = self.driver.ex_destroy_nic('a202e51b-41c0-4cfc-add0-b1c62fc0ecf6')
self.assertTrue(node)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_create_network_domain(self):
location = self.driver.ex_get_location_by_id('NA9')
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(location=location,
name='test',
description='test',
service_plan=plan)
self.assertEqual(net.name, 'test')
self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa')
def test_ex_get_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(net.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(net.description, 'test2')
self.assertEqual(net.name, 'test')
def test_ex_update_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
net.name = 'new name'
net2 = self.driver.ex_update_network_domain(net)
self.assertEqual(net2.name, 'new name')
def test_ex_delete_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
result = self.driver.ex_delete_network_domain(net)
self.assertTrue(result)
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, 'test-net1')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, 'Aurora')
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
def test_ex_create_vlan(self,):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
vlan = self.driver.ex_create_vlan(network_domain=net,
name='test',
private_ipv4_base_address='10.3.4.0',
private_ipv4_prefix_size='24')
self.assertEqual(vlan.id, 'cee8df03-9117-44cc-baaa-631ffa099683')
def test_ex_get_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
self.assertEqual(vlan.description, 'test2')
self.assertEqual(vlan.name, 'Production VLAN')
self.assertEqual(vlan.private_ipv4_range_address, '10.0.3.0')
self.assertEqual(vlan.private_ipv4_range_size, '24')
def test_ex_update_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
vlan.name = 'new name'
vlan2 = self.driver.ex_update_vlan(vlan)
self.assertEqual(vlan2.name, 'new name')
def test_ex_delete_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
result = self.driver.ex_delete_vlan(vlan)
self.assertTrue(result)
def test_ex_expand_vlan(self):
vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
vlan.private_ipv4_range_size = '23'
vlan = self.driver.ex_expand_vlan(vlan)
self.assertEqual(vlan.private_ipv4_range_size, '23')
def test_ex_add_public_ip_block_to_network_domain(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
block = self.driver.ex_add_public_ip_block_to_network_domain(net)
self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
def test_ex_list_public_ip_blocks(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
blocks = self.driver.ex_list_public_ip_blocks(net)
self.assertEqual(blocks[0].base_ip, '168.128.4.18')
self.assertEqual(blocks[0].size, '2')
self.assertEqual(blocks[0].id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(blocks[0].location.id, 'NA9')
self.assertEqual(blocks[0].network_domain.id, net.id)
def test_ex_get_public_ip_block(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(block.base_ip, '168.128.4.18')
self.assertEqual(block.size, '2')
self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
self.assertEqual(block.location.id, 'NA9')
self.assertEqual(block.network_domain.id, net.id)
def test_ex_delete_public_ip_block(self):
block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
result = self.driver.ex_delete_public_ip_block(block)
self.assertTrue(result)
def test_ex_list_firewall_rules(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
self.assertEqual(rules[0].id, '756cba02-b0bc-48f4-aea5-9445870b6148')
self.assertEqual(rules[0].network_domain.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
self.assertEqual(rules[0].name, 'CCDEFAULT.BlockOutboundMailIPv4')
self.assertEqual(rules[0].action, 'DROP')
self.assertEqual(rules[0].ip_version, 'IPV4')
self.assertEqual(rules[0].protocol, 'TCP')
self.assertEqual(rules[0].source.ip_address, 'ANY')
self.assertTrue(rules[0].source.any_ip)
self.assertTrue(rules[0].destination.any_ip)
def test_ex_create_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[0], 'FIRST')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_get_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
def test_ex_set_firewall_rule_state(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
result = self.driver.ex_set_firewall_rule_state(rule, False)
self.assertTrue(result)
def test_ex_delete_firewall_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
result = self.driver.ex_delete_firewall_rule(rule)
self.assertTrue(result)
def test_ex_create_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_create_nat_rule(net, '1.2.3.4', '4.3.2.1')
self.assertEqual(rule.id, 'd31c2db0-be6b-4d50-8744-9a7a534b5fba')
def test_ex_list_nat_rules(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rules = self.driver.ex_list_nat_rules(net)
self.assertEqual(rules[0].id, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rules[0].internal_ip, '10.0.0.15')
self.assertEqual(rules[0].external_ip, '165.180.12.18')
def test_ex_get_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rule.id, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
self.assertEqual(rule.internal_ip, '10.0.0.16')
self.assertEqual(rule.external_ip, '165.180.12.19')
def test_ex_delete_nat_rule(self):
net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
rule = self.driver.ex_get_nat_rule(net, '2187a636-7ebb-49a1-a2ff-5d617f496dce')
result = self.driver.ex_delete_nat_rule(rule)
self.assertTrue(result)
class InvalidRequestError(Exception):
def __init__(self, tag):
super(InvalidRequestError, self).__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures('dimensiondata')
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_myaccount.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_image.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load('oec_0_9_base_imageWithDiskSpeed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(self, method, url, body, headers):
body = None
action = url.split('?')[-1]
if action == 'restart':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml')
elif action == 'shutdown':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml')
elif action == 'delete':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml')
elif action == 'start':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml')
elif action == 'poweroff':
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(self, method, url, body, headers):
body = self.fixtures.load(
'oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_RESOURCEBUSY.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}resetServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS.xml')
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}expandVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addPublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removePublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce(self, method, url, body, headers):
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic(self, method, url, body, headers):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
'caas_2_0_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| |
#!/usr/bin/env python
import json
import netaddr
import os
import random
import re
import requests
import sys
import base64
from azure.storage.blob import AppendBlobService
from azure.storage.table import TableService
import azure.mgmt.network
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.network import NetworkManagementClient, NetworkManagementClientConfiguration
def prepare_storage(settings):
default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"]
storage_access_key = settings["STORAGE_ACCESS_KEY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container(
container_name='stemcell',
public_access='blob'
)
# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
table_service.create_table('stemcells')
def render_bosh_manifest(settings):
with open('bosh.pub', 'r') as tmpfile:
ssh_public_key = tmpfile.read().strip()
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_BOSH'])
gateway_ip = str(ip[1])
bosh_director_ip = str(ip[4])
ntp_servers_maps = {
"AzureCloud": "0.north-america.pool.ntp.org",
"AzureChinaCloud": "1.cn.pool.ntp.org, 1.asia.pool.ntp.org, 0.asia.pool.ntp.org"
}
environment = settings["ENVIRONMENT"]
ntp_servers = ntp_servers_maps[environment]
# Render the manifest for bosh-init
bosh_template = 'bosh.yml'
if os.path.exists(bosh_template):
with open(bosh_template, 'r') as tmpfile:
contents = tmpfile.read()
keys = [
"SUBNET_ADDRESS_RANGE_FOR_BOSH",
"SECONDARY_DNS",
"VNET_NAME",
"SUBNET_NAME_FOR_BOSH",
"DNS_RECURSOR",
"SUBSCRIPTION_ID",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"RESOURCE_GROUP_NAME",
"KEEP_UNREACHABLE_VMS",
"TENANT_ID",
"CLIENT_ID",
"CLIENT_SECRET",
"BOSH_PUBLIC_IP",
"NSG_NAME_FOR_BOSH",
"BOSH_RELEASE_URL",
"BOSH_RELEASE_SHA1",
"BOSH_AZURE_CPI_RELEASE_URL",
"BOSH_AZURE_CPI_RELEASE_SHA1",
"STEMCELL_URL",
"STEMCELL_SHA1",
"ENVIRONMENT"
]
for k in keys:
v = settings[k]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(k))).sub(str(v), contents)
contents = re.compile(re.escape("REPLACE_WITH_SSH_PUBLIC_KEY")).sub(ssh_public_key, contents)
contents = re.compile(re.escape("REPLACE_WITH_GATEWAY_IP")).sub(gateway_ip, contents)
contents = re.compile(re.escape("REPLACE_WITH_BOSH_DIRECTOR_IP")).sub(bosh_director_ip, contents)
contents = re.compile(re.escape("REPLACE_WITH_NTP_SERVERS")).sub(ntp_servers, contents)
with open(bosh_template, 'w') as tmpfile:
tmpfile.write(contents)
return bosh_director_ip
def get_cloud_foundry_configuration(scenario, settings, bosh_director_ip):
config = {}
keys = [
"SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY",
"VNET_NAME",
"SUBNET_NAME_FOR_CLOUD_FOUNDRY",
"CLOUD_FOUNDRY_PUBLIC_IP",
"NSG_NAME_FOR_CLOUD_FOUNDRY"
]
for key in keys:
config[key] = settings[key]
dns_maps = {
"AzureCloud": "168.63.129.16, {0}".format(settings["SECONDARY_DNS"]),
"AzureChinaCloud": bosh_director_ip
}
environment = settings["ENVIRONMENT"]
config["DNS"] = dns_maps[environment]
with open('cloudfoundry.cert', 'r') as tmpfile:
ssl_cert = tmpfile.read()
with open('cloudfoundry.key', 'r') as tmpfile:
ssl_key = tmpfile.read()
ssl_cert_and_key = "{0}{1}".format(ssl_cert, ssl_key)
indentation = " " * 8
ssl_cert_and_key = ("\n"+indentation).join([line for line in ssl_cert_and_key.split('\n')])
config["SSL_CERT_AND_KEY"] = ssl_cert_and_key
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY'])
config["GATEWAY_IP"] = str(ip[1])
config["RESERVED_IP_FROM"] = str(ip[2])
config["RESERVED_IP_TO"] = str(ip[3])
config["CLOUD_FOUNDRY_INTERNAL_IP"] = str(ip[4])
config["SYSTEM_DOMAIN"] = "{0}.xip.io".format(settings["CLOUD_FOUNDRY_PUBLIC_IP"])
if scenario == "single-vm-cf":
config["STATIC_IP_FROM"] = str(ip[4])
config["STATIC_IP_TO"] = str(ip[100])
config["POSTGRES_IP"] = str(ip[11])
elif scenario == "multiple-vm-cf":
config["STATIC_IP_FROM"] = str(ip[4])
config["STATIC_IP_TO"] = str(ip[100])
config["HAPROXY_IP"] = str(ip[4])
config["POSTGRES_IP"] = str(ip[11])
config["ROUTER_IP"] = str(ip[12])
config["NATS_IP"] = str(ip[13])
config["ETCD_IP"] = str(ip[14])
config["NFS_IP"] = str(ip[15])
config["CONSUL_IP"] = str(ip[16])
return config
def render_cloud_foundry_manifest(settings, bosh_director_ip):
for scenario in ["single-vm-cf", "multiple-vm-cf"]:
cloudfoundry_template = "{0}.yml".format(scenario)
if os.path.exists(cloudfoundry_template):
with open(cloudfoundry_template, 'r') as tmpfile:
contents = tmpfile.read()
config = get_cloud_foundry_configuration(scenario, settings, bosh_director_ip)
for key in config:
value = config[key]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents)
with open(cloudfoundry_template, 'w') as tmpfile:
tmpfile.write(contents)
def render_bosh_deployment_cmd(bosh_director_ip):
bosh_deployment_cmd = "deploy_bosh.sh"
if os.path.exists(bosh_deployment_cmd):
with open(bosh_deployment_cmd, 'r') as tmpfile:
contents = tmpfile.read()
contents = re.compile(re.escape("REPLACE_WITH_BOSH_DIRECOT_IP")).sub(bosh_director_ip, contents)
with open(bosh_deployment_cmd, 'w') as tmpfile:
tmpfile.write(contents)
def render_cloud_foundry_deployment_cmd(settings):
cloudfoundry_deployment_cmd = "deploy_cloudfoundry.sh"
if os.path.exists(cloudfoundry_deployment_cmd):
with open(cloudfoundry_deployment_cmd, 'r') as tmpfile:
contents = tmpfile.read()
keys = [
"STEMCELL_URL",
"STEMCELL_SHA1",
"CF_RELEASE_URL",
"CF_RELEASE_SHA1",
"DIEGO_RELEASE_URL",
"DIEGO_RELEASE_SHA1",
"GARDEN_RELEASE_URL",
"GARDEN_RELEASE_SHA1",
"CFLINUXFS2_RELEASE_URL",
"CFLINUXFS2_RELEASE_SHA1"
]
for key in keys:
value = settings[key]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents)
with open(cloudfoundry_deployment_cmd, 'w') as tmpfile:
tmpfile.write(contents)
def get_settings():
settings = dict()
config_file = sys.argv[4]
with open(config_file) as f:
settings = json.load(f)
settings['TENANT_ID'] = sys.argv[1]
settings['CLIENT_ID'] = sys.argv[2]
settings['CLIENT_SECRET'] = base64.b64decode(sys.argv[3])
print "tenant_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['TENANT_ID'][0:4])
print "client_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['CLIENT_ID'][0:4])
print "The length of client_secret is {0}".format(len(settings['CLIENT_SECRET']))
return settings
def main():
settings = get_settings()
with open('settings', "w") as tmpfile:
tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
prepare_storage(settings)
bosh_director_ip = render_bosh_manifest(settings)
render_bosh_deployment_cmd(bosh_director_ip)
render_cloud_foundry_manifest(settings, bosh_director_ip)
render_cloud_foundry_deployment_cmd(settings)
if __name__ == "__main__":
main()
| |
import logging
import sqlite3
from logging import Logger
from sqlite3 import Connection
from typing import Optional
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
from slack_sdk.oauth.installation_store.installation_store import InstallationStore
from slack_sdk.oauth.installation_store.models.bot import Bot
from slack_sdk.oauth.installation_store.models.installation import Installation
class SQLite3InstallationStore(InstallationStore, AsyncInstallationStore):
def __init__(
self,
*,
database: str,
client_id: str,
logger: Logger = logging.getLogger(__name__),
):
self.database = database
self.client_id = client_id
self.init_called = False
self._logger = logger
@property
def logger(self) -> Logger:
if self._logger is None:
self._logger = logging.getLogger(__name__)
return self._logger
def init(self):
try:
with sqlite3.connect(database=self.database) as conn:
cur = conn.execute("select count(1) from slack_installations;")
row_num = cur.fetchone()[0]
self.logger.debug(
f"{row_num} installations are stored in {self.database}"
)
except Exception: # skipcq: PYL-W0703
self.create_tables()
self.init_called = True
def connect(self) -> Connection:
if not self.init_called:
self.init()
return sqlite3.connect(database=self.database)
def create_tables(self):
with sqlite3.connect(database=self.database) as conn:
conn.execute(
"""
create table slack_installations (
id integer primary key autoincrement,
client_id text not null,
app_id text not null,
enterprise_id text not null default '',
enterprise_name text,
enterprise_url text,
team_id text not null default '',
team_name text,
bot_token text not null,
bot_id text not null,
bot_user_id text not null,
bot_scopes text,
bot_refresh_token text, -- since v3.8
bot_token_expires_at datetime, -- since v3.8
user_id text not null,
user_token text,
user_scopes text,
user_refresh_token text, -- since v3.8
user_token_expires_at datetime, -- since v3.8
incoming_webhook_url text,
incoming_webhook_channel text,
incoming_webhook_channel_id text,
incoming_webhook_configuration_url text,
is_enterprise_install boolean not null default 0,
token_type text,
installed_at datetime not null default current_timestamp
);
"""
)
conn.execute(
"""
create index slack_installations_idx on slack_installations (
client_id,
enterprise_id,
team_id,
user_id,
installed_at
);
"""
)
conn.execute(
"""
create table slack_bots (
id integer primary key autoincrement,
client_id text not null,
app_id text not null,
enterprise_id text not null default '',
enterprise_name text,
team_id text not null default '',
team_name text,
bot_token text not null,
bot_id text not null,
bot_user_id text not null,
bot_scopes text,
bot_refresh_token text, -- since v3.8
bot_token_expires_at datetime, -- since v3.8
is_enterprise_install boolean not null default 0,
installed_at datetime not null default current_timestamp
);
"""
)
conn.execute(
"""
create index slack_bots_idx on slack_bots (
client_id,
enterprise_id,
team_id,
installed_at
);
"""
)
self.logger.debug(f"Tables have been created (database: {self.database})")
conn.commit()
async def async_save(self, installation: Installation):
return self.save(installation)
async def async_save_bot(self, bot: Bot):
return self.save_bot(bot)
def save(self, installation: Installation):
with self.connect() as conn:
conn.execute(
"""
insert into slack_installations (
client_id,
app_id,
enterprise_id,
enterprise_name,
enterprise_url,
team_id,
team_name,
bot_token,
bot_id,
bot_user_id,
bot_scopes,
bot_refresh_token, -- since v3.8
bot_token_expires_at, -- since v3.8
user_id,
user_token,
user_scopes,
user_refresh_token, -- since v3.8
user_token_expires_at, -- since v3.8
incoming_webhook_url,
incoming_webhook_channel,
incoming_webhook_channel_id,
incoming_webhook_configuration_url,
is_enterprise_install,
token_type
)
values
(
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?
);
""",
[
self.client_id,
installation.app_id,
installation.enterprise_id or "",
installation.enterprise_name,
installation.enterprise_url,
installation.team_id or "",
installation.team_name,
installation.bot_token,
installation.bot_id,
installation.bot_user_id,
",".join(installation.bot_scopes),
installation.bot_refresh_token,
installation.bot_token_expires_at,
installation.user_id,
installation.user_token,
",".join(installation.user_scopes)
if installation.user_scopes
else None,
installation.user_refresh_token,
installation.user_token_expires_at,
installation.incoming_webhook_url,
installation.incoming_webhook_channel,
installation.incoming_webhook_channel_id,
installation.incoming_webhook_configuration_url,
1 if installation.is_enterprise_install else 0,
installation.token_type,
],
)
self.logger.debug(
f"New rows in slack_bots and slack_installations have been created (database: {self.database})"
)
conn.commit()
self.save_bot(installation.to_bot())
def save_bot(self, bot: Bot):
with self.connect() as conn:
conn.execute(
"""
insert into slack_bots (
client_id,
app_id,
enterprise_id,
enterprise_name,
team_id,
team_name,
bot_token,
bot_id,
bot_user_id,
bot_scopes,
bot_refresh_token, -- since v3.8
bot_token_expires_at, -- since v3.8
is_enterprise_install
)
values
(
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?,
?
);
""",
[
self.client_id,
bot.app_id,
bot.enterprise_id or "",
bot.enterprise_name,
bot.team_id or "",
bot.team_name,
bot.bot_token,
bot.bot_id,
bot.bot_user_id,
",".join(bot.bot_scopes),
bot.bot_refresh_token,
bot.bot_token_expires_at,
bot.is_enterprise_install,
],
)
conn.commit()
async def async_find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
return self.find_bot(
enterprise_id=enterprise_id,
team_id=team_id,
is_enterprise_install=is_enterprise_install,
)
def find_bot(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
is_enterprise_install: Optional[bool] = False,
) -> Optional[Bot]:
if is_enterprise_install or team_id is None:
team_id = ""
try:
with self.connect() as conn:
cur = conn.execute(
"""
select
app_id,
enterprise_id,
enterprise_name,
team_id,
team_name,
bot_token,
bot_id,
bot_user_id,
bot_scopes,
bot_refresh_token, -- since v3.8
bot_token_expires_at, -- since v3.8
is_enterprise_install,
installed_at
from
slack_bots
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
order by installed_at desc
limit 1
""",
[self.client_id, enterprise_id or "", team_id or ""],
)
row = cur.fetchone()
result = "found" if row and len(row) > 0 else "not found"
self.logger.debug(
f"find_bot's query result: {result} (database: {self.database})"
)
if row and len(row) > 0:
bot = Bot(
app_id=row[0],
enterprise_id=row[1],
enterprise_name=row[2],
team_id=row[3],
team_name=row[4],
bot_token=row[5],
bot_id=row[6],
bot_user_id=row[7],
bot_scopes=row[8],
bot_refresh_token=row[9],
bot_token_expires_at=row[10],
is_enterprise_install=row[11],
installed_at=row[12],
)
return bot
return None
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find bot installation data for enterprise: {enterprise_id}, team: {team_id}: {e}"
if self.logger.level <= logging.DEBUG:
self.logger.exception(message)
else:
self.logger.warning(message)
return None
async def async_find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
return self.find_installation(
enterprise_id=enterprise_id,
team_id=team_id,
user_id=user_id,
is_enterprise_install=is_enterprise_install,
)
def find_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
is_enterprise_install: Optional[bool] = False,
) -> Optional[Installation]:
if is_enterprise_install or team_id is None:
team_id = ""
try:
with self.connect() as conn:
row = None
columns = """
app_id,
enterprise_id,
enterprise_name,
enterprise_url,
team_id,
team_name,
bot_token,
bot_id,
bot_user_id,
bot_scopes,
bot_refresh_token, -- since v3.8
bot_token_expires_at, -- since v3.8
user_id,
user_token,
user_scopes,
user_refresh_token, -- since v3.8
user_token_expires_at, -- since v3.8
incoming_webhook_url,
incoming_webhook_channel,
incoming_webhook_channel_id,
incoming_webhook_configuration_url,
is_enterprise_install,
token_type,
installed_at
"""
if user_id is None:
cur = conn.execute(
f"""
select
{columns}
from
slack_installations
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
order by installed_at desc
limit 1
""",
[self.client_id, enterprise_id or "", team_id],
)
row = cur.fetchone()
else:
cur = conn.execute(
f"""
select
{columns}
from
slack_installations
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
and
user_id = ?
order by installed_at desc
limit 1
""",
[self.client_id, enterprise_id or "", team_id, user_id],
)
row = cur.fetchone()
if row is None:
return None
result = "found" if row and len(row) > 0 else "not found"
self.logger.debug(
f"find_installation's query result: {result} (database: {self.database})"
)
if row and len(row) > 0:
installation = Installation(
app_id=row[0],
enterprise_id=row[1],
enterprise_name=row[2],
enterprise_url=row[3],
team_id=row[4],
team_name=row[5],
bot_token=row[6],
bot_id=row[7],
bot_user_id=row[8],
bot_scopes=row[9],
bot_refresh_token=row[10],
bot_token_expires_at=row[11],
user_id=row[12],
user_token=row[13],
user_scopes=row[14],
user_refresh_token=row[15],
user_token_expires_at=row[16],
incoming_webhook_url=row[17],
incoming_webhook_channel=row[18],
incoming_webhook_channel_id=row[19],
incoming_webhook_configuration_url=row[20],
is_enterprise_install=row[21],
token_type=row[22],
installed_at=row[23],
)
return installation
return None
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to find an installation data for enterprise: {enterprise_id}, team: {team_id}: {e}"
if self.logger.level <= logging.DEBUG:
self.logger.exception(message)
else:
self.logger.warning(message)
return None
def delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
try:
with self.connect() as conn:
conn.execute(
"""
delete
from
slack_bots
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
""",
[self.client_id, enterprise_id or "", team_id or ""],
)
conn.commit()
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to delete bot installation data for enterprise: {enterprise_id}, team: {team_id}: {e}"
if self.logger.level <= logging.DEBUG:
self.logger.exception(message)
else:
self.logger.warning(message)
def delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None,
) -> None:
try:
with self.connect() as conn:
if user_id is None:
conn.execute(
"""
delete
from
slack_installations
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
""",
[self.client_id, enterprise_id or "", team_id],
)
else:
conn.execute(
"""
delete
from
slack_installations
where
client_id = ?
and
enterprise_id = ?
and
team_id = ?
and
user_id = ?
""",
[self.client_id, enterprise_id or "", team_id, user_id],
)
conn.commit()
except Exception as e: # skipcq: PYL-W0703
message = f"Failed to delete installation data for enterprise: {enterprise_id}, team: {team_id}: {e}"
if self.logger.level <= logging.DEBUG:
self.logger.exception(message)
else:
self.logger.warning(message)
| |
from pygame.locals import *
import pygame
import math
import json
import socket
from get_ip import check_for_internet_conection as get_ip
import sys
from time import sleep, localtime
from weakref import WeakKeyDictionary
from PodSixNet.Server import Server
from PodSixNet.Channel import Channel
from settings import *
class ClientChannel(Channel):
def __init__(self, *args, **kwargs):
Channel.__init__(self, *args, **kwargs)
def Close(self):
self._server.game_over = True
self._server.DelPlayer(self)
def Network_print_game_state(self, data):
self._server.update(self)
self._server.SendToAll({'action': 'game_state',
'get_json': self._server.get_json(),
'score': self._server.score,
'newimg_angle': self._server.ball.angle,
'ball_rect_x': self._server.ball.rect.x,
'ball_rect_y': self._server.ball.rect.y,
'ball_rect_h': self._server.ball.rect.height,
'ball_rect_w': self._server.ball.rect.width,
'highscore': self._server.highscore})
def Network_mouse_pos(self, data):
if self._server.player_can_write(self):
self._server.pointer.rect.x = data['x']
self._server.pointer.rect.y = data['y']
class Ball(pygame.sprite.Sprite):
def __init__(self, x, y, image_name='ball.png'):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(IMAGES_PATH + image_name)
self.image.set_colorkey(self.image.get_at((0, 0)))
self.image.set_colorkey((0, 0, 0))
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.gravity = GRAVITY
self.cap = CAP
self.bounce = BOUNCE
self.friction = FRICTION
self.kick = KICK
self.x = x
self.y = y
self.dx = 0
self.dy = 0
self.on_ground = False
self.spin = 0
self.angle = 0
self.center()
def center(self):
self.rect.centerx, self.rect.centery = CENTER
self.x = self.rect.x
self.y = self.rect.y
def update(self):
self.x += self.dx / 2
self.y += self.dy / 2
self.angle += self.spin
if self.angle < 0:
self.angle += 360
if self.angle > 360:
self.angle -= 360
if not self.on_ground:
self.dy += self.gravity
if self.dy > self.cap:
self.dy = self.cap
if self.on_ground:
self.dx *= self.friction
self.spin = -self.dx
if self.on_ground and abs(self.dx) - 0.5 < 0:
self.dx = 0
self.spin = -self.dx
self.rect.x = self.x
self.rect.y = self.y
class Pointer(pygame.sprite.Sprite):
def __init__(self):
self.rect = pygame.Rect(
pygame.mouse.get_pos()[0],
pygame.mouse.get_pos()[1],
1,
1)
self.mask = pygame.Mask((1, 1))
self.mask.set_at((0, 0), 1)
class Game(Server):
channelClass = ClientChannel
def player_can_write(self, channel):
return True
def Connected(self, channel, addr):
if self.current_index < 2:
self.AddPlayer(channel)
def DelPlayer(self, player):
self.players[player] = False
def AddPlayer(self, player):
self.players[player] = True
self.players_order[player] = self.current_index
self.current_index += 1
def SendToAll(self, data):
[p.Send(data) for p in self.players]
def __init__(self, *args, **kwargs):
pygame.init()
Server.__init__(self, *args, **kwargs)
self.players = WeakKeyDictionary()
self.players_order = WeakKeyDictionary()
self.current_index = 0
self.ball = Ball(*CENTER)
self.pointer = Pointer()
self.score = 0
self.difficulty = 3
self.highscore = 0
self.paused = False
self.subrect = self.ball.image.get_rect()
self.subrect.width = 84
self.subrect.height = 83
self.newimg = self.ball.image
self.tries = TRIES
self.game_over = False
print('Server launched')
def check_for_collision(self):
if pygame.sprite.collide_mask(
self.pointer,
self.ball) and not self.paused:
if self.ball.mask.get_at(
(int(self.pointer.rect.x - self.ball.x), int(self.pointer.rect.y - self.ball.y))):
hit = self.ball.mask.overlap(self.pointer.mask, (int(
self.pointer.rect.x - self.ball.x), int(self.pointer.rect.y - self.ball.y)))
hit = (
hit[0] -
self.ball.rect.width /
2,
hit[1] -
self.ball.rect.height /
2)
angle = math.degrees(math.atan2(hit[0], hit[1]))
dx = 30 * math.cos(math.radians(angle + 90))
dy = 30 * math.sin(math.radians(angle - 90))
self.ball.dx = dx
self.ball.dy = dy
self.ball.on_ground = False
self.ball.spin = -dx / 5
self.score += 1
def update(self, player):
self.check_for_collision()
if self.ball.x > WINDOWWIDTH - self.ball.rect.width:
self.ball.x = WINDOWWIDTH - self.ball.rect.width
self.ball.dx = -self.ball.dx * self.ball.friction
self.ball.spin = self.ball.dy
if self.ball.y > WINDOWHEIGHT - self.ball.rect.height:
if self.score > 0:
self.tries -= 1
self.score = 0
self.ball.y = WINDOWHEIGHT - self.ball.rect.height
if not self.ball.on_ground:
self.ball.dx *= self.ball.friction
self.ball.spin = -self.ball.dx
if (self.ball.dy * self.ball.bounce) - 5 > 0:
self.ball.dy = -self.ball.dy * self.ball.bounce
else:
self.ball.dy = 0
self.ball.on_ground = True
if self.ball.x < 0:
self.ball.x = 0
self.ball.dx = -self.ball.dx * self.ball.bounce
self.ball.spin = -self.ball.dy
if self.score > self.highscore:
self.highscore = self.score
self.ball.update()
print(self.tries)
if self.tries <= 0:
self.game_over = True
self.SendToAll({'action': 'game_over',
'data': self.get_json(),
'highscore': self.highscore})
self.DelPlayer(player)
pygame.quit()
def get_json(self):
return json.dumps(
{'images': {str(id(self)): {'image': 'ball', 'x': self.ball.x, 'y': self.ball.y}}})
def Launch(self):
while True:
self.Pump()
sleep(0.0001)
if __name__ == "__main__":
host = get_ip()
s = Game(localaddr=(host, int(31425)))
s.Launch()
| |
r"""
``$ mwpersistence diffs2persistence -h``
::
Generates token persistence information from JSON revision documents
annotated with diff information (see `mwdiffs dump2diffs|revdocs2diffs`).
This utility expects to be fed revision documents in as a page-partitioned
chronological sequence so that diffs can be computed upon in order.
This utility uses a processing 'window' to limit memory usage. New
revisions enter the head of the window and old revisions fall off the tail.
Stats are generated at the tail of the window.
::
window
.------+------.
revisions ========[=============]=============>
/ \
[tail] [head]
Usage:
diffs2persistence (-h|--help)
diffs2persistence [<input-file>...] --sunset=<date>
[--window=<revs>] [--revert-radius=<revs>]
[--keep-diff] [--threads=<num>] [--output=<path>]
[--compress=<type>] [--verbose] [--debug]
Options:
-h|--help Prints this documentation
<input-file> The path a file containing page-partitioned
JSON revision documents with a 'diff' field to
process.
--sunset=<date> The date of the database dump we are generating
from. This is used to apply a 'time visible'
statistic. Expects %Y-%m-%dT%H:%M:%SZ".
[default: <now>]
--window=<revs> The size of the window of revisions from which
persistence data will be generated.
[default: 50]
--revert-radius=<revs> The number of revisions back that a revert can
reference. [default: 15]
--keep-diff Do not drop 'diff' field data from the json
blobs.
--threads=<num> If a collection of files are provided, how many
processor threads should be prepare?
[default: <cpu_count>]
--output=<path> Write output to a directory with one output
file per input path. [default: <stdout>]
--compress=<type> If set, output written to the output-dir will
be compressed in this format. [default: bz2]
--verbose Print dots and stuff to stderr
--debug Print debug logging to stderr.
"""
import logging
import sys
import time
from collections import deque
from itertools import groupby
import mwcli
import mwxml.utilities
from more_itertools import peekable
from mwtypes import Timestamp
from ..state import DiffState
logger = logging.getLogger(__name__)
def process_args(args):
return {'window_size': int(args['--window']),
'revert_radius': int(args['--revert-radius']),
'sunset': Timestamp(args['--sunset'])
if args['--sunset'] != "<now>"
else Timestamp(time.time()),
'keep_diff': bool(args['--keep-diff'])}
def _diffs2persistence(*args, keep_diff=False, **kwargs):
keep_diff = bool(keep_diff)
docs = diffs2persistence(*args, **kwargs)
if not keep_diff:
docs = drop_diff(docs)
yield from docs
def drop_diff(rev_docs):
for rev_doc in rev_docs:
rev_doc.pop('diff', None)
yield rev_doc
def diffs2persistence(rev_docs, window_size=50, revert_radius=15, sunset=None,
verbose=False):
"""
Processes a sorted and page-partitioned sequence of revision documents into
and adds a 'persistence' field to them containing statistics about how each
token "added" in the revision persisted through future revisions.
:Parameters:
rev_docs : `iterable` ( `dict` )
JSON documents of revision data containing a 'diff' field as
generated by ``dump2diffs``. It's assumed that rev_docs are
partitioned by page and otherwise in chronological order.
window_size : `int`
The size of the window of revisions from which persistence data
will be generated.
revert_radius : `int`
The number of revisions back that a revert can reference.
sunset : :class:`mwtypes.Timestamp`
The date of the database dump we are generating from. This is
used to apply a 'time visible' statistic. If not set, now() will
be assumed.
keep_diff : `bool`
Do not drop the `diff` field from the revision document after
processing is complete.
verbose : `bool`
Prints out dots and stuff to stderr
:Returns:
A generator of rev_docs with a 'persistence' field containing
statistics about individual tokens.
"""
rev_docs = mwxml.utilities.normalize(rev_docs)
window_size = int(window_size)
revert_radius = int(revert_radius)
sunset = Timestamp(sunset) if sunset is not None \
else Timestamp(time.time())
# Group the docs by page
page_docs = groupby(rev_docs, key=lambda d: d['page']['title'])
for page_title, rev_docs in page_docs:
if verbose:
sys.stderr.write(page_title + ": ")
# We need a look-ahead to know how long this revision was visible
rev_docs = peekable(rev_docs)
# The window allows us to manage memory
window = deque(maxlen=window_size)
# The state does the actual processing work
state = DiffState(revert_radius=revert_radius)
while rev_docs:
rev_doc = next(rev_docs)
next_doc = rev_docs.peek(None)
if next_doc is not None:
seconds_visible = Timestamp(next_doc['timestamp']) - \
Timestamp(rev_doc['timestamp'])
else:
seconds_visible = sunset - Timestamp(rev_doc['timestamp'])
if seconds_visible < 0:
logger.warn("Seconds visible {0} is less than zero."
.format(seconds_visible))
seconds_visible = 0
_, tokens_added, _ = \
state.update_opdocs(rev_doc['sha1'], rev_doc['diff']['ops'],
(rev_doc['user'], seconds_visible))
if len(window) == window_size:
# Time to start writing some stats
old_doc, old_added = window[0]
window.append((rev_doc, tokens_added))
persistence = token_persistence(old_doc, old_added, window,
None)
old_doc['persistence'] = persistence
yield old_doc
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
else:
window.append((rev_doc, tokens_added))
while len(window) > 0:
old_doc, old_added = window.popleft()
persistence = token_persistence(old_doc, old_added, window, sunset)
old_doc['persistence'] = persistence
yield old_doc
if verbose:
sys.stderr.write("_")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
def token_persistence(rev_doc, tokens_added, window, sunset):
if sunset is None:
# Use the last revision in the window
sunset = Timestamp(window[-1][0]['timestamp'])
seconds_possible = max(sunset - Timestamp(rev_doc['timestamp']), 0)
return {
'revisions_processed': len(window),
'non_self_processed': sum(rd['user'] != rev_doc['user']
for rd, _ in window),
'seconds_possible': seconds_possible,
'tokens': [td for td in generate_token_docs(rev_doc, tokens_added)]
}
def generate_token_docs(rev_doc, tokens_added):
for token in tokens_added:
yield {
"text": str(token),
"persisted": len(token.revisions) - 1,
"non_self_persisted": sum(u != rev_doc['user']
for u, _ in token.revisions),
"seconds_visible": sum(sv for _, sv in token.revisions)
}
streamer = mwcli.Streamer(
__doc__,
__name__,
_diffs2persistence,
process_args
)
main = streamer.main
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparameter values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
from tensorflow.contrib.training.python.training import hparam_pb2
from tensorflow.python.framework import ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
# Define the regular expression for parsing a single clause of the input
# (delimited by commas). A legal clause looks like:
# <variable name>[<index>]? = <rhs>
# where <rhs> is either a single token or [] enclosed list of tokens.
# For example: "var[1] = a" or "x = [1,2,3]"
PARAM_RE = re.compile(r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""", re.VERBOSE)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' %
(name, var_type.__name__, value, values))
def _reuse_fail(name, values):
"""Helper function for raising a value error for reuse of name."""
raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name,
values))
def _process_scalar_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict['val'])
except ValueError:
_parse_fail(name, var_type, m_dict['val'], values)
# If no index is provided
if not m_dict['index']:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict['index'])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail('{}[{}]'.format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values,
results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict['index'] is not None:
raise ValueError('Assignment of a list to a list index.')
elements = filter(None, re.split('[ ,]', m_dict['vals']))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict['vals'], values)
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = (
"Could not cast hparam '%s' of type '%s' from value %r" %
(name, param_type, value))
# If `value` is already of type `param_type`, return it directly.
# `isinstance` is too weak (e.g. isinstance(True, int) == True).
if type(value) == param_type: # pylint: disable=unidiomatic-typecheck
return value
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if (issubclass(param_type, (six.string_types, six.binary_type)) and
not isinstance(value, (six.string_types, six.binary_type))):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if (issubclass(param_type, numbers.Integral) and
not isinstance(value, numbers.Integral)):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if (issubclass(param_type, numbers.Number) and
not isinstance(value, numbers.Number)):
raise ValueError(fail_msg)
return param_type(value)
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError('Malformed hyperparameter value: %s' % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict['name']
if name not in type_map:
if ignore_unknown:
continue
raise ValueError('Unknown hyperparameter type for %s' % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ['true', 'True']:
return True
elif value in ['false', 'False']:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict['val'] is not None:
_process_scalar_value(name, parse, type_, m_dict, values,
results_dictionary)
# If the assigned value is a list:
elif m_dict['vals'] is not None:
_process_list_value(name, parse, type_, m_dict, values,
results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, '', values)
return results_dictionary
class HParams(object):
"""Class to hold a set of hyperparameters as name-value pairs.
A `HParams` object holds hyperparameters used to build and train a model,
such as the number of hidden units in a neural net layer or the learning rate
to use when training.
You first create a `HParams` object by specifying the names and values of the
hyperparameters.
To make them easily accessible the parameter names are added as direct
attributes of the class. A typical usage is as follows:
```python
# Create a HParams object specifying names and values of the model
# hyperparameters:
hparams = HParams(learning_rate=0.1, num_hidden_units=100)
# The hyperparameter are available as attributes of the HParams object:
hparams.learning_rate ==> 0.1
hparams.num_hidden_units ==> 100
```
Hyperparameters have type, which is inferred from the type of their value
passed at construction type. The currently supported types are: integer,
float, boolean, string, and list of integer, float, boolean, or string.
You can override hyperparameter values by calling the
[`parse()`](#HParams.parse) method, passing a string of comma separated
`name=value` pairs. This is intended to make it possible to override
any hyperparameter values from a single command-line flag to which
the user passes 'hyper-param=value' pairs. It avoids having to define
one flag for each hyperparameter.
The syntax expected for each value depends on the type of the parameter.
See `parse()` for a description of the syntax.
Example:
```python
# Define a command line flag to pass name=value pairs.
# For example using argparse:
import argparse
parser = argparse.ArgumentParser(description='Train my model.')
parser.add_argument('--hparams', type=str,
help='Comma separated list of "name=value" pairs.')
args = parser.parse_args()
...
def my_program():
# Create a HParams object specifying the names and values of the
# model hyperparameters:
hparams = tf.contrib.training.HParams(
learning_rate=0.1,
num_hidden_units=100,
activations=['relu', 'tanh'])
# Override hyperparameters values by parsing the command line
hparams.parse(args.hparams)
# If the user passed `--hparams=learning_rate=0.3` on the command line
# then 'hparams' has the following attributes:
hparams.learning_rate ==> 0.3
hparams.num_hidden_units ==> 100
hparams.activations ==> ['relu', 'tanh']
# If the hyperparameters are in json format use parse_json:
hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}')
```
"""
_HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks.
def __init__(self, hparam_def=None, model_structure=None, **kwargs):
"""Create an instance of `HParams` from keyword arguments.
The keyword arguments specify name-values pairs for the hyperparameters.
The parameter types are inferred from the type of the values passed.
The parameter names are added as attributes of `HParams` object, so they
can be accessed directly with the dot notation `hparams._name_`.
Example:
```python
# Define 3 hyperparameters: 'learning_rate' is a float parameter,
# 'num_hidden_units' an integer parameter, and 'activation' a string
# parameter.
hparams = tf.contrib.training.HParams(
learning_rate=0.1, num_hidden_units=100, activation='relu')
hparams.activation ==> 'relu'
```
Note that a few names are reserved and cannot be used as hyperparameter
names. If you use one of the reserved name the constructor raises a
`ValueError`.
Args:
hparam_def: Serialized hyperparameters, encoded as a hparam_pb2.HParamDef
protocol buffer. If provided, this object is initialized by
deserializing hparam_def. Otherwise **kwargs is used.
model_structure: An instance of ModelStructure, defining the feature
crosses to be used in the Trial.
**kwargs: Key-value pairs where the key is the hyperparameter name and
the value is the value for the parameter.
Raises:
ValueError: If both `hparam_def` and initialization values are provided,
or if one of the arguments is invalid.
"""
# Register the hyperparameters and their type in _hparam_types.
# This simplifies the implementation of parse().
# _hparam_types maps the parameter name to a tuple (type, bool).
# The type value is the type of the parameter for scalar hyperparameters,
# or the type of the list elements for multidimensional hyperparameters.
# The bool value is True if the value is a list, False otherwise.
self._hparam_types = {}
self._model_structure = model_structure
if hparam_def:
self._init_from_proto(hparam_def)
if kwargs:
raise ValueError('hparam_def and initialization values are '
'mutually exclusive')
else:
for name, value in six.iteritems(kwargs):
self.add_hparam(name, value)
def _init_from_proto(self, hparam_def):
"""Creates a new HParams from `HParamDef` protocol buffer.
Args:
hparam_def: `HParamDef` protocol buffer.
"""
assert isinstance(hparam_def, hparam_pb2.HParamDef)
for name, value in hparam_def.hparam.items():
kind = value.WhichOneof('kind')
if kind.endswith('_value'):
# Single value.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, int(getattr(value, kind)))
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(name, compat.as_str(getattr(value, kind)))
else:
self.add_hparam(name, getattr(value, kind))
else:
# List of values.
if kind.startswith('int64'):
# Setting attribute value to be 'int' to ensure the type is compatible
# with both Python2 and Python3.
self.add_hparam(name, [int(v) for v in getattr(value, kind).value])
elif kind.startswith('bytes'):
# Setting attribute value to be 'str' to ensure the type is compatible
# with both Python2 and Python3. UTF-8 encoding is assumed.
self.add_hparam(
name, [compat.as_str(v) for v in getattr(value, kind).value])
else:
self.add_hparam(name, [v for v in getattr(value, kind).value])
def add_hparam(self, name, value):
"""Adds {name, value} pair to hyperparameters.
Args:
name: Name of the hyperparameter.
value: Value of the hyperparameter. Can be one of the following types:
int, float, string, int list, float list, or string list.
Raises:
ValueError: if one of the arguments is invalid.
"""
# Keys in kwargs are unique, but 'name' could the name of a pre-existing
# attribute of this object. In that case we refuse to use it as a
# hyperparameter name.
if getattr(self, name, None) is not None:
raise ValueError('Hyperparameter name is reserved: %s' % name)
if isinstance(value, (list, tuple)):
if not value:
raise ValueError(
'Multi-valued hyperparameters cannot be empty: %s' % name)
self._hparam_types[name] = (type(value[0]), True)
else:
self._hparam_types[name] = (type(value), False)
setattr(self, name, value)
def set_hparam(self, name, value):
"""Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
"""
param_type, is_list = self._hparam_types[name]
if isinstance(value, list):
if not is_list:
raise ValueError(
'Must not pass a list for single-valued parameter: %s' % name)
setattr(self, name, [
_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(
'Must pass a list for multi-valued parameter: %s.' % name)
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
def parse(self, values):
"""Override existing hyperparameter values, parsing new values from a string.
See parse_values for more detail on the allowed format for values.
Args:
values: String. Comma separated list of `name=value` pairs where 'value'
must follow the syntax described above.
Returns:
The `HParams` instance.
Raises:
ValueError: If `values` cannot be parsed or a hyperparameter in `values`
doesn't exist.
"""
type_map = {}
for name, t in self._hparam_types.items():
param_type, _ = t
type_map[name] = param_type
values_map = parse_values(values, type_map)
return self.override_from_dict(values_map)
def override_from_dict(self, values_dict):
"""Override existing hyperparameter values, parsing new values from a dictionary.
Args:
values_dict: Dictionary of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_dict` doesn't exist.
ValueError: If `values_dict` cannot be parsed.
"""
for name, value in values_dict.items():
self.set_hparam(name, value)
return self
@deprecation.deprecated(None, 'Use `override_from_dict`.')
def set_from_map(self, values_map):
"""DEPRECATED. Use override_from_dict."""
return self.override_from_dict(values_dict=values_map)
def set_model_structure(self, model_structure):
self._model_structure = model_structure
def get_model_structure(self):
return self._model_structure
def to_json(self, indent=None, separators=None, sort_keys=False):
"""Serializes the hyperparameters into JSON.
Args:
indent: If a non-negative integer, JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, or
negative, will only insert newlines. `None` (the default) selects the
most compact representation.
separators: Optional `(item_separator, key_separator)` tuple. Default is
`(', ', ': ')`.
sort_keys: If `True`, the output dictionaries will be sorted by key.
Returns:
A JSON string.
"""
return json.dumps(
self.values(),
indent=indent,
separators=separators,
sort_keys=sort_keys)
def parse_json(self, values_json):
"""Override existing hyperparameter values, parsing new values from a json object.
Args:
values_json: String containing a json object of name:value pairs.
Returns:
The `HParams` instance.
Raises:
KeyError: If a hyperparameter in `values_json` doesn't exist.
ValueError: If `values_json` cannot be parsed.
"""
values_map = json.loads(values_json)
return self.override_from_dict(values_map)
def values(self):
"""Return the hyperparameter values as a Python dictionary.
Returns:
A dictionary with hyperparameter names as keys. The values are the
hyperparameter values.
"""
return {n: getattr(self, n) for n in self._hparam_types.keys()}
def get(self, key, default=None):
"""Returns the value of `key` if it exists, else `default`."""
if key in self._hparam_types:
# Ensure that default is compatible with the parameter type.
if default is not None:
param_type, is_param_list = self._hparam_types[key]
type_str = 'list<%s>' % param_type if is_param_list else str(param_type)
fail_msg = ("Hparam '%s' of type '%s' is incompatible with "
'default=%s' % (key, type_str, default))
is_default_list = isinstance(default, list)
if is_param_list != is_default_list:
raise ValueError(fail_msg)
try:
if is_default_list:
for value in default:
_cast_to_type_if_compatible(key, param_type, value)
else:
_cast_to_type_if_compatible(key, param_type, default)
except ValueError as e:
raise ValueError('%s. %s' % (fail_msg, e))
return getattr(self, key)
return default
def __contains__(self, key):
return key in self._hparam_types
def __str__(self):
hpdict = self.values()
output_list = ['{}={}'.format(key, hpdict[key]) for key in hpdict]
return ','.join(output_list)
def __repr__(self):
strval = str(sorted(self.values().items()))
return '%s(%s)' % (type(self).__name__, strval)
@staticmethod
def _get_kind_name(param_type, is_list):
"""Returns the field name given parameter type and is_list.
Args:
param_type: Data type of the hparam.
is_list: Whether this is a list.
Returns:
A string representation of the field name.
Raises:
ValueError: If parameter type is not recognized.
"""
if issubclass(param_type, bool):
# This check must happen before issubclass(param_type, six.integer_types),
# since Python considers bool to be a subclass of int.
typename = 'bool'
elif issubclass(param_type, six.integer_types):
# Setting 'int' and 'long' types to be 'int64' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'int64'
elif issubclass(param_type, (six.string_types, six.binary_type)):
# Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is
# compatible with both Python2 and Python3.
typename = 'bytes'
elif issubclass(param_type, float):
typename = 'float'
else:
raise ValueError('Unsupported parameter type: %s' % str(param_type))
suffix = 'list' if is_list else 'value'
return '_'.join([typename, suffix])
def to_proto(self, export_scope=None): # pylint: disable=unused-argument
"""Converts a `HParams` object to a `HParamDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `HParamDef` protocol buffer.
"""
hparam_proto = hparam_pb2.HParamDef()
for name in self._hparam_types:
# Parse the values.
param_type, is_list = self._hparam_types.get(name, (None, None))
kind = HParams._get_kind_name(param_type, is_list)
if is_list:
if kind.startswith('bytes'):
v_list = [compat.as_bytes(v) for v in getattr(self, name)]
else:
v_list = [v for v in getattr(self, name)]
getattr(hparam_proto.hparam[name], kind).value.extend(v_list)
else:
v = getattr(self, name)
if kind.startswith('bytes'):
v = compat.as_bytes(getattr(self, name))
setattr(hparam_proto.hparam[name], kind, v)
return hparam_proto
@staticmethod
def from_proto(hparam_def, import_scope=None): # pylint: disable=unused-argument
return HParams(hparam_def=hparam_def)
ops.register_proto_function(
'hparams',
proto_type=hparam_pb2.HParamDef,
to_proto=HParams.to_proto,
from_proto=HParams.from_proto)
| |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/with_padding_1.2s/')
from data_padding_hshv_1dot2s import Fmat_original_hshv
from data_padding_hslv_1dot2s import Fmat_original_hslv
from data_padding_lshv_1dot2s import Fmat_original_lshv
from data_padding_lslv_1dot2s import Fmat_original_lslv
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((20,1)))
sigma = np.matrix(np.zeros((20,1)))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((20,n)))
DIVS = m/20
for i in range(n):
index = 0
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((20,2))
B_rm_hshv = np.zeros((20,2))
B_sf_hshv = np.zeros((20,2))
B_sm_hshv = np.zeros((20,2))
for num_states in range(20):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:121,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:121,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,45:60])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((20,2))
B_rm_hslv = np.zeros((20,2))
B_sf_hslv = np.zeros((20,2))
B_sm_hslv = np.zeros((20,2))
for num_states in range(20):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_lshv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_lshv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_lshv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_lshv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:121,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:121,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:60])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lslv[0:121,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lslv[0:121,15:30]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lslv[0:121,30:45]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lslv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((20,2))
B_rm_lshv = np.zeros((20,2))
B_sf_lshv = np.zeros((20,2))
B_sm_lshv = np.zeros((20,2))
for num_states in range(20):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lslv[0:121,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lslv[0:121,15:30])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lslv[0:121,30:45])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lslv[0:121,45:60])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:121,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:121,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,45:60])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((20,2))
B_rm_lslv = np.zeros((20,2))
B_sf_lslv = np.zeros((20,2))
B_sm_lslv = np.zeros((20,2))
for num_states in range(20):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,0:15], Fmat_original_hslv[0:121,0:15], Fmat_original_lshv[0:121,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,15:30], Fmat_original_hslv[0:121,15:30], Fmat_original_lshv[0:121,15:30])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,30:45], Fmat_original_hslv[0:121,30:45], Fmat_original_lshv[0:121,30:45])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:121,45:60], Fmat_original_hslv[0:121,45:60], Fmat_original_lshv[0:121,45:60])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:121,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:121,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,45:60])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_20_states.png')
pp.show()
| |
#!/usr/bin/env python
# queue-cmd.py
# GusE 2013.12.16 V0.1
"""
Queue shell commands
"""
__version__ = "1.3"
import getopt
import sys
import os
import subprocess
import traceback
import logging
import logging.handlers
import tempfile
import argparse
from re import match
__app__ = os.path.basename(__file__)
__author__ = "Gus E"
__copyright__ = "Copyright 2013"
__credits__ = ["Gus E"]
__license__ = "GPL"
__maintainer__ = "Gus E"
__email__ = "gesquive@gmail"
__status__ = "Production"
script_www = 'https://github.com/gesquive/queue-cmd'
script_url = 'https://raw.github.com/gesquive/queue-cmd/master/queue-cmd.py'
#--------------------------------------
# Configurable Constants
LOG_FILE = '/var/log/' + os.path.splitext(__app__)[0] + '.log'
LOG_SIZE = 1024*1024*200
DEFAULT_TAIL_LINES = 24
verbose = False
debug = False
logger = logging.getLogger(__app__)
def main():
global verbose, debug
verbose = False
debug = False
parser = argparse.ArgumentParser(add_help=False,
description="Queue shell commands.",
epilog="%(__app__)s v%(__version__)s\n" % globals())
group = parser.add_argument_group("Options")
group.add_argument("-h", "--help", action="help",
help="Show this help message and exit.")
group.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Writes all messages to console.")
group.add_argument("-d", "--debug", action="store_true", dest="debug",
help=argparse.SUPPRESS)
group.add_argument("-l", "--log-file", dest="log_file", default=LOG_FILE)
group.add_argument("-u", "--update", action="store_true", dest="update",
help="Checks server for an update, replaces the current version if "\
"there is a newer version available.")
group.add_argument("-V", "--version", action="version",
version="%(__app__)s v%(__version__)s" % globals())
group = parser.add_argument_group("Queue Options")
group.add_argument("command", nargs="?", help="The shell command to run. "
"Required when adding a command.")
group.add_argument("-n", "--queue-name", dest="queue_name",
type=str, default="default",
help="The unique queue name to perform this action on.")
group = parser.add_argument_group("Status Options")
group.add_argument("-s", "--print-status", action="store_true",
dest="print_status", help="Print the status of the current queue.")
group.add_argument("-q", "--print-queue", action="store_true",
dest="print_queue", help="Print the queue of commands.")
group.add_argument("-o", "--print-output", dest="print_output",
const=-1, type=int, nargs='?', metavar="NUM_LINES",
help="Print the last NUM_LINES of the current command.")
group.add_argument("-t", "--tail-output",
action="store_true", dest="tail_ouput",
help="Tail the output of the current command.")
args = parser.parse_args()
verbose = args.verbose
debug = args.debug
if args.update:
update(script_url)
sys.exit()
log_file = args.log_file
if not os.access(os.path.dirname(log_file), os.W_OK):
# Couldn't write to the given log file, try writing a temporary one instead
log_file = os.path.join(tempfile.gettempdir(),
os.path.splitext(__app__)[0] + '.log')
if not os.access(os.path.dirname(log_file), os.W_OK):
print "ERROR: Cannot write to '%(log_file)s'.\nExiting." % locals()
sys.exit(2)
file_handler = logging.handlers.RotatingFileHandler(log_file,
maxBytes=LOG_SIZE, backupCount=9)
file_formater = logging.Formatter('%(asctime)s,%(levelname)s,%(process)d,%(message)s')
file_handler.setFormatter(file_formater)
logger.addHandler(file_handler)
if verbose:
console_handler = logging.StreamHandler(sys.stdout)
console_formatter = logging.Formatter("[%(asctime)s] %(levelname)-5.5s: %(message)s")
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
if debug:
logger.setLevel(logging.DEBUG)
logger.debug("Debug mode activated.")
else:
logger.setLevel(logging.INFO)
try:
print_mode = args.print_status or args.tail_ouput \
or args.print_output or args.print_queue
if args.print_status or args.tail_ouput or args.print_output:
# Then we will be entering status mode
# Always read the first line, get the pid file, and check to see
# if we are still running
output_file = get_output_file(args.queue_name, "r")
status_line = ""
if output_file:
status_line = output_file.readline()
info = match(r'(\d*),([\d\.]*),(\d*),(.*)$', status_line)
if not info:
print "The command output is missing/corrupted."
sys.exit(1)
(m_pid, ctime, c_pid, cmd) = info.groups()
if not pid_exists(m_pid):
m_pid = None
if not pid_exists(c_pid):
c_pid = None
if args.tail_ouput:
#TODO: We need a way of detecting when a new file has replaced
# this file, and then switch to tailing that file
lines = follow(output_file)
for line in lines:
sys.stdout.write(line)
elif args.print_status:
from datetime import datetime
if m_pid:
print "Queue is running"
print "queue PID: %s" % m_pid
else:
print "Queue is not running"
if c_pid:
print "Command is running"
print "command PID: %s" % c_pid
else:
print "Command is not running"
print "start: %s" \
% datetime.fromtimestamp(float(ctime)).strftime('%Y-%m-%d %H:%M:%S')
print "command: '%s'" % cmd
elif args.print_output:
if args.print_output == -1:
args.print_output = DEFAULT_TAIL_LINES
if not m_pid:
print "The queue is not running. Printing last output\n"
output = tail(output_file, lines=args.print_output+1)
# Remember to skip the initial info line
for line in output[1:]:
sys.stdout.write(line)
output_file.close()
elif args.print_queue:
queue_file = get_queue_file(args.queue_name)
if args.queue_name == "default":
sys.stdout.write("Command Queue")
else:
sys.stdout.write("Command Queue \'%s\'" % args.queue_name)
line_no = 1
for line in queue_file:
sys.stdout.write("\n%02d: %s" % (line_no, line.rstrip()))
line_no += 1
queue_file.close()
if line_no == 1:
print "\rQueue is empty!".ljust(len(args.queue_name)+4)
print ""
elif not args.command and not args.update:
print "You did not specify a command to run."
sys.exit(0)
# First check to see if we are the master or slave
is_master = get_lock_file()
# Check if master is running, if it is not, the start it up
if is_master:
logger.info("Run in mode: master")
daemonize()
else:
logger.info("Run in mode: slave")
if args.command:
push_command(args.command)
shell_runners = []
complete = 0
while is_master:
# Then we are the main thread, start running the threads
cmd = get_next_command(args.queue_name)
if not cmd: # Then there are no commands left to run, finish up
logger.info("All shell commands completed.")
break
runner = ShellRunner(cmd, name="default")
shell_runners.append(runner)
runner.start()
# Force threads to just one for now
threads = 1
# Always add on one thread to account for the Main Thread
while (threading.activeCount() >= (threads + 1)):
sleep(0.25)
# One of the threads finished, cleanup
complete += 1
for runner in shell_runners:
if not runner.isAlive():
logger.info("Shell command completed: '%s'" % runner.cmd)
runner.handled = True
shell_runners = [r for r in shell_runners if not r.handled]
except (KeyboardInterrupt, SystemExit):
pass
except Exception, e:
print traceback.format_exc()
close_lock_file()
import fcntl
import os
pid_file = None
def get_lock_file():
global pid_file
# Generate path
pid_file_path = os.path.join(tempfile.gettempdir(), \
os.path.splitext(__app__)[0])
logger.debug("Getting lock file '%(pid_file_path)s'" % locals())
# If the file exists, read it
pid_file = open(pid_file_path, 'a+')
try:
fcntl.flock(pid_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# Another running process has this lock
pid_file.close()
pid_file = None
return False
# If the pid does not exist, replace pid with our own, run as master
pid_file.seek(0)
pid_file.truncate()
pid_file.write(str(os.getpid()))
pid_file.flush()
return True
def close_lock_file():
global pid_file
if pid_file:
pid_file.close()
pid_file = None
def push_command(cmd, name="default"):
logger.debug("Pushing command %(name)s:>'%(cmd)s'" % locals())
queue_file = get_queue_file(name)
# logger.debug("Writing too %s:%d" % (queue_file.name, queue_file.tell()))
queue_file.write(cmd)
queue_file.write("\n")
queue_file.flush()
queue_file.close()
def get_next_command(name="default"):
logger.debug("Getting command for %(name)s" % locals())
queue_file = get_queue_file(name)
queue_file.seek(0)
commands = queue_file.readlines()
cmd = None
if len(commands) > 0:
queue_file.seek(0)
queue_file.truncate()
cmd = commands[0].strip()
if len(commands) > 1:
for line in commands[1:]:
queue_file.write(line)
queue_file.close()
logger.debug("Got next command %(name)s:>'%(cmd)s'" % locals())
return cmd
import fcntl
from time import sleep
def get_queue_file(name="default"):
queue_file_path = os.path.join(tempfile.gettempdir(), \
os.path.splitext(__app__)[0] + "-" + name + ".q")
logger.debug("Acquiring queue file lock for '%(queue_file_path)s'" % locals())
lock_aquired = False
while not lock_aquired:
queue_file = open(queue_file_path, "a+")
try:
fcntl.flock(queue_file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_aquired = True
return queue_file
except IOError:
# Another running process has this lock, wait for it to give it up
sleep(.5)
logger.debug("Queue lock file acquired")
def get_output_file(name="default", permissions="a+"):
output_file_path = os.path.join(tempfile.gettempdir(), \
os.path.splitext(__app__)[0] + '-' + name + ".out")
logger.debug("Acquiring output file '%s'", output_file_path)
output_file = None
try:
output_file = open(output_file_path, permissions)
except:
pass
return output_file
def pid_exists(pid):
"""
Returns true if pid is still running
"""
return os.path.exists('/proc/%s' % pid)
def tail(f, lines=1, _buffer=4098):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, 2) # os.SEEK_END
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return lines_found[-lines:]
import time
def follow(f_file):
f_file.seek(0,2) # Go to the end of the file
sleep = 0.00001
while True:
line = f_file.readline()
if not line:
time.sleep(sleep) # Sleep briefly
if sleep < 1.0:
sleep += 0.00001
continue
sleep = 0.00001
yield line
import threading
import subprocess
class ShellRunner(threading.Thread):
cmd = None
cwd = None
seperate = True
stdout = subprocess.PIPE
print_to_stdout = False
cmd_output = []
def __init__(self, cmd, cwd=None, seperate=True, name="default"):
threading.Thread.__init__(self)
self.cmd = cmd
self.cwd = cwd
self.seperate = seperate
self.queue = name
def run(self):
from time import time
from os import getpid
output_file = get_output_file(self.queue, 'w+')
proc = subprocess.Popen(self.cmd, shell=self.seperate,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.cwd)
output_file.write("%d,%s,%d,%s\n" % (getpid(), time(),\
proc.pid , self.cmd))
output_file.flush()
logger.info("Daemon running '%s' as pid=%d", self.cmd, proc.pid)
for line in iter(proc.stdout.readline, ""):
output_file.write(line)
# print line,
proc.wait()
output_file.close()
logger.info("Daemon run complete with code %d.", proc.returncode)
import sys
import os
def daemonize (stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
'''This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that
will be opened and be used to replace the standard file descriptors
in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null.
Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output
may not appear in the order that you expect.
'''
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Now I am a daemon!
# Redirect standard file descriptors.
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def update(dl_url, force_update=False):
"""
Attempts to download the update url in order to find if an update is needed.
If an update is needed, the current script is backed up and the update is
saved in its place.
"""
import urllib
import re
from subprocess import call
def compare_versions(vA, vB):
"""
Compares two version number strings
@param vA: first version string to compare
@param vB: second version string to compare
@author <a href="http_stream://sebthom.de/136-comparing-version-numbers-in-jython-pytho/">Sebastian Thomschke</a>
@return negative if vA < vB, zero if vA == vB, positive if vA > vB.
"""
if vA == vB: return 0
def num(s):
if s.isdigit(): return int(s)
return s
seqA = map(num, re.findall('\d+|\w+', vA.replace('-SNAPSHOT', '')))
seqB = map(num, re.findall('\d+|\w+', vB.replace('-SNAPSHOT', '')))
# this is to ensure that 1.0 == 1.0.0 in cmp(..)
lenA, lenB = len(seqA), len(seqB)
for i in range(lenA, lenB): seqA += (0,)
for i in range(lenB, lenA): seqB += (0,)
rc = cmp(seqA, seqB)
if rc == 0:
if vA.endswith('-SNAPSHOT'): return -1
if vB.endswith('-SNAPSHOT'): return 1
return rc
# dl the first 256 bytes and parse it for version number
try:
http_stream = urllib.urlopen(dl_url)
update_file = http_stream.read(256)
http_stream.close()
except IOError, (errno, strerror):
print "Unable to retrieve version data"
print "Error %s: %s" % (errno, strerror)
return
match_regex = re.search(r'__version__ *= *"(\S+)"', update_file)
if not match_regex:
print "No version info could be found"
return
update_version = match_regex.group(1)
if not update_version:
print "Unable to parse version data"
return
if force_update:
print "Forcing update, downloading version %s..." \
% update_version
else:
cmp_result = compare_versions(__version__, update_version)
if cmp_result < 0:
print "Newer version %s available, downloading..." % update_version
elif cmp_result > 0:
print "Local version %s newer then available %s, not updating." \
% (__version__, update_version)
return
else:
print "You already have the latest version."
return
# dl, backup, and save the updated script
app_path = os.path.realpath(sys.argv[0])
if not os.access(app_path, os.W_OK):
print "Cannot update -- unable to write to %s" % app_path
dl_path = app_path + ".new"
backup_path = app_path + ".old"
try:
dl_file = open(dl_path, 'w')
http_stream = urllib.urlopen(dl_url)
total_size = None
bytes_so_far = 0
chunk_size = 8192
try:
total_size = int(http_stream.info().getheader('Content-Length').strip())
except:
# The header is improper or missing Content-Length, just download
dl_file.write(http_stream.read())
while total_size:
chunk = http_stream.read(chunk_size)
dl_file.write(chunk)
bytes_so_far += len(chunk)
if not chunk:
break
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
(bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
http_stream.close()
dl_file.close()
except IOError, (errno, strerror):
print "Download failed"
print "Error %s: %s" % (errno, strerror)
return
try:
os.rename(app_path, backup_path)
except OSError, (errno, strerror):
print "Unable to rename %s to %s: (%d) %s" \
% (app_path, backup_path, errno, strerror)
return
try:
os.rename(dl_path, app_path)
except OSError, (errno, strerror):
print "Unable to rename %s to %s: (%d) %s" \
% (dl_path, app_path, errno, strerror)
return
try:
import shutil
shutil.copymode(backup_path, app_path)
except:
os.chmod(app_path, 0755)
print "New version installed as %s" % app_path
print "(previous version backed up to %s)" % (backup_path)
return
if __name__ == '__main__':
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class VirtualMachineExtensionImagesOperations(object):
"""VirtualMachineExtensionImagesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def get(
self, location, publisher_name, type, version, custom_headers=None, raw=False, **operation_config):
"""Gets a virtual machine extension image.
:param location:
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param version:
:type version: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachineExtensionImage
<azure.mgmt.compute.models.VirtualMachineExtensionImage>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineExtensionImage', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_types(
self, location, publisher_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image types.
:param location:
:type location: str
:param publisher_name:
:type publisher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`VirtualMachineExtensionImage
<azure.mgmt.compute.models.VirtualMachineExtensionImage>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_versions(
self, location, publisher_name, type, filter=None, top=None, orderby=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of virtual machine extension image versions.
:param location:
:type location: str
:param publisher_name:
:type publisher_name: str
:param type:
:type type: str
:param filter: The filter to apply on the operation.
:type filter: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`VirtualMachineExtensionImage
<azure.mgmt.compute.models.VirtualMachineExtensionImage>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions'
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'type': self._serialize.url("type", type, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[VirtualMachineExtensionImage]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from oslo_serialization import jsonutils
import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import function
class FindInMap(function.Function):
'''
A function for resolving keys in the template mappings.
Takes the form::
{ "Fn::FindInMap" : [ "mapping",
"key",
"value" ] }
'''
def __init__(self, stack, fn_name, args):
super(FindInMap, self).__init__(stack, fn_name, args)
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
raise KeyError(six.text_type(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
key = function.resolve(self._mapkey)
value = function.resolve(self._mapvalue)
return mapping[key][value]
class GetAZs(function.Function):
'''
A function for retrieving the availability zones.
Takes the form::
{ "Fn::GetAZs" : "<region>" }
'''
def result(self):
# TODO(therve): Implement region scoping
# region = function.resolve(self.args)
if self.stack is None:
return ['nova']
else:
return self.stack.get_availability_zones()
class ParamRef(function.Function):
'''
A function for resolving parameter references.
Takes the form::
{ "Ref" : "<param_name>" }
'''
def __init__(self, stack, fn_name, args):
super(ParamRef, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
param_name = function.resolve(self.args)
try:
return self.parameters[param_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=param_name,
key='unknown')
class ResourceRef(function.Function):
'''
A function for resolving resource references.
Takes the form::
{ "Ref" : "<resource_name>" }
'''
def _resource(self, path='unknown'):
resource_name = function.resolve(self.args)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dependencies(self, path):
return itertools.chain(super(ResourceRef, self).dependencies(path),
[self._resource(path)])
def result(self):
return self._resource().FnGetRefId()
def Ref(stack, fn_name, args):
'''
A function for resolving parameters or resource references.
Takes the form::
{ "Ref" : "<param_name>" }
or::
{ "Ref" : "<resource_name>" }
'''
if args in stack:
RefClass = ResourceRef
else:
RefClass = ParamRef
return RefClass(stack, fn_name, args)
class GetAtt(function.Function):
'''
A function for resolving resource attributes.
Takes the form::
{ "Fn::GetAtt" : [ "<resource_name>",
"<attribute_name" ] }
'''
def __init__(self, stack, fn_name, args):
super(GetAtt, self).__init__(stack, fn_name, args)
self._resource_name, self._attribute = self._parse_args()
def _parse_args(self):
try:
resource_name, attribute = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute]') % self.fn_name)
return resource_name, attribute
def _resource(self, path='unknown'):
resource_name = function.resolve(self._resource_name)
try:
return self.stack[resource_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=resource_name,
key=path)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
attrs = [function.resolve(self._attribute)]
else:
attrs = []
return itertools.chain(super(GetAtt, self).dep_attrs(resource_name),
attrs)
def dependencies(self, path):
return itertools.chain(super(GetAtt, self).dependencies(path),
[self._resource(path)])
def validate(self):
super(GetAtt, self).validate()
res = self._resource()
attr = function.resolve(self._attribute)
from heat.engine import resource
if (type(res).FnGetAtt == resource.Resource.FnGetAtt and
attr not in six.iterkeys(res.attributes_schema)):
raise exception.InvalidTemplateAttribute(
resource=self._resource_name, key=attr)
def result(self):
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME, r.UPDATE)):
return r.FnGetAtt(attribute)
# NOTE(sirushtim): Add r.INIT to states above once convergence
# is the default.
elif r.stack.has_cache_data(r.name) and r.action == r.INIT:
return r.FnGetAtt(attribute)
else:
return None
class Select(function.Function):
'''
A function for selecting an item from a list or map.
Takes the form (for a list lookup)::
{ "Fn::Select" : [ "<index>", [ "<value_1>", "<value_2>", ... ] ] }
Takes the form (for a map lookup)::
{ "Fn::Select" : [ "<index>", { "<key_1>": "<value_1>", ... } ] }
If the selected index is not found, this function resolves to an empty
string.
'''
def __init__(self, stack, fn_name, args):
super(Select, self).__init__(stack, fn_name, args)
try:
self._lookup, self._strings = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[index, collection]') % self.fn_name)
def result(self):
index = function.resolve(self._lookup)
try:
index = int(index)
except (ValueError, TypeError):
pass
strings = function.resolve(self._strings)
if strings == '':
# an empty string is a common response from other
# functions when result is not currently available.
# Handle by returning an empty string
return ''
if isinstance(strings, six.string_types):
# might be serialized json.
try:
strings = jsonutils.loads(strings)
except ValueError as json_ex:
fmt_data = {'fn_name': self.fn_name,
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
if isinstance(strings, collections.Mapping):
if not isinstance(index, six.string_types):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
if (isinstance(strings, collections.Sequence) and
not isinstance(strings, six.string_types)):
if not isinstance(index, six.integer_types):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
try:
return strings[index]
except IndexError:
return ''
if strings is None:
return ''
raise TypeError(_('Arguments to %s not fully resolved') %
self.fn_name)
class Join(function.Function):
'''
A function for joining strings.
Takes the form::
{ "Fn::Join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
'''
def __init__(self, stack, fn_name, args):
super(Join, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if strings is None:
strings = []
if (isinstance(strings, six.string_types) or
not isinstance(strings, collections.Sequence)):
raise TypeError(_('"%s" must operate on a list') % self.fn_name)
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
if s is None:
return ''
if not isinstance(s, six.string_types):
raise TypeError(
_('Items to join must be strings %s') % (repr(s)[:200]))
return s
return delim.join(ensure_string(s) for s in strings)
class Split(function.Function):
'''
A function for splitting strings.
Takes the form::
{ "Fn::Split" : [ "<delim>", "<string_1><delim><string_2>..." ] }
And resolves to::
[ "<string_1>", "<string_2>", ... ]
'''
def __init__(self, stack, fn_name, args):
super(Split, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "str1,str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if not isinstance(self._delim, six.string_types):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
if not isinstance(strings, six.string_types):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
return strings.split(self._delim)
class Replace(function.Function):
'''
A function for performing string substitutions.
Takes the form::
{ "Fn::Replace" : [
{ "<key_1>": "<value_1>", "<key_2>": "<value_2>", ... },
"<key_1> <key_2>"
] }
And resolves to::
"<value_1> <value_2>"
This is implemented using python str.replace on each key. The order in
which replacements are performed is undefined.
'''
def __init__(self, stack, fn_name, args):
super(Replace, self).__init__(stack, fn_name, args)
self._mapping, self._string = self._parse_args()
if not isinstance(self._mapping, collections.Mapping):
raise TypeError(_('"%s" parameters must be a mapping') %
self.fn_name)
def _parse_args(self):
example = ('{"%s": '
'[ {"$var1": "foo", "%%var2%%": "bar"}, '
'"$var1 is %%var2%%"]}' % self.fn_name)
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
mapping, string = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
else:
return mapping, string
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
raise TypeError(_('"%s" params must be strings or numbers') %
self.fn_name)
return string.replace(placeholder, six.text_type(value))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class Base64(function.Function):
'''
A placeholder function for converting to base64.
Takes the form::
{ "Fn::Base64" : "<string>" }
This function actually performs no conversion. It is included for the
benefit of templates that convert UserData to Base64. Heat accepts UserData
in plain text.
'''
def result(self):
resolved = function.resolve(self.args)
if not isinstance(resolved, six.string_types):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
class MemberListToMap(function.Function):
'''
A function for converting lists containing enumerated keys and values to
a mapping.
Takes the form::
{ 'Fn::MemberListToMap' : [ 'Name',
'Value',
[ '.member.0.Name=<key_0>',
'.member.0.Value=<value_0>',
... ] ] }
And resolves to::
{ "<key_0>" : "<value_0>", ... }
The first two arguments are the names of the key and value.
'''
def __init__(self, stack, fn_name, args):
super(MemberListToMap, self).__init__(stack, fn_name, args)
try:
self._keyname, self._valuename, self._list = self.args
except ValueError:
correct = '''
{'Fn::MemberListToMap': ['Name', 'Value',
['.member.0.Name=key',
'.member.0.Value=door']]}
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
if not isinstance(self._keyname, six.string_types):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
if not isinstance(self._valuename, six.string_types):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
if not isinstance(member_list, collections.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
if not isinstance(s, six.string_types):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
partials = dict(item(s) for s in member_list)
return aws_utils.extract_param_pairs(partials,
prefix='',
keyname=self._keyname,
valuename=self._valuename)
class ResourceFacade(function.Function):
'''
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
{ "Fn::ResourceFacade": "<attribute_type>" }
where the valid attribute types are "Metadata", "DeletionPolicy" and
"UpdatePolicy".
'''
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'Metadata', 'DeletionPolicy', 'UpdatePolicy'
)
def __init__(self, stack, fn_name, args):
super(ResourceFacade, self).__init__(stack, fn_name, args)
if self.args not in self._RESOURCE_ATTRIBUTES:
fmt_data = {'fn_name': self.fn_name,
'allowed': ', '.join(self._RESOURCE_ATTRIBUTES)}
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be one of: %(allowed)s') % fmt_data)
def result(self):
attr = function.resolve(self.args)
if attr == self.METADATA:
return self.stack.parent_resource.metadata_get()
elif attr == self.UPDATE_POLICY:
up = self.stack.parent_resource.t.get('UpdatePolicy', {})
return function.resolve(up)
elif attr == self.DELETION_POLICY:
dp = self.stack.parent_resource.t.deletion_policy()
return function.resolve(dp)
| |
#! /usr/bin/env python
import logging
import re
import time
import requests
import json
class Slacker(object):
def __init__(self, slack_name, token, logger=None):
"""
slack name is the short name of the slack (preceding '.slack.com')
token should be a Slack API Token.
"""
self.slack_name = slack_name
self.token = token
assert self.token, "Token should not be blank"
self.logger = logger or logging.getLogger(__name__)
self.url = self.api_url()
self.get_users()
self.get_channels()
def get_emojis(self):
url = self.url + "emoji.list?token={}".format(self.token)
payload = requests.get(url).json()
return payload
def get_user(self, uid):
url = self.url + "users.info?token={}&user={}".format(self.token, uid)
payload = requests.get(url).json()
return payload
def get_users(self):
url = self.url + "users.list?token=" + self.token
payload = requests.get(url).json()['members']
self.users_by_id = {x['id']: x['name'] for x in payload}
self.users_by_name = {x['name']: x['id'] for x in payload}
self.restricted_users = [x['id'] for x in payload if x.get('is_restricted')]
self.ultra_restricted_users = [x['id'] for x in payload if x.get('is_ultra_restricted')]
self.all_restricted_users = set(self.restricted_users + self.ultra_restricted_users)
self.logger.debug("All restricted user names: {}".format([self.users_by_id[x] for x in self.all_restricted_users]))
return payload
def asciify(self, text):
return ''.join([x for x in list(text) if ord(x) in range(128)])
def add_channel_markup(self, channel_name, fail_silently=True):
channel_id = self.get_channelid(channel_name)
if channel_id:
return "<#{}|{}>".format(channel_id, channel_name)
else:
if fail_silently:
return "#{}".format(channel_name)
else:
return None
def get_messages_in_time_range(self, oldest, cid, latest=None):
assert cid in self.channels_by_id, "Unknown channel ID {}".format(cid)
cname = self.channels_by_id[cid]
messages = []
done = False
while not done:
murl = self.url + "channels.history?oldest={}&token={}&channel={}".format(oldest, self.token, cid)
if latest:
murl += "&latest={}".format(latest)
else:
murl += "&latest={}".format(int(time.time()))
payload = requests.get(murl).json()
messages += payload['messages']
if payload['has_more'] is False:
done = True
continue
ts = [float(x['ts']) for x in messages]
earliest = min(ts)
latest = earliest
messages.sort(key=lambda x: float(x['ts']))
for message in messages:
message['channel'] = cname
return messages
def replace_id(self, cid):
"""
Assuming either a #channelid or @personid, replace them with #channelname or @username
"""
stripped = cid[1:]
first = cid[0]
if first == "#":
m = [x for x in self.channels if self.channels[x] == stripped]
if m:
return "#" + m[0]
else:
return cid
elif first == "@":
# occasionally input will have the format "userid|name".
# in case the name changed at some point,
# lookup user by userid in users_by_id
if "|" in stripped:
uname_parts = stripped.split("|")
uname = self.users_by_id[uname_parts[0]]
else:
uname = self.users_by_id[stripped]
if uname:
return "@" + uname
return cid
def detokenize(self, message):
new = []
tokens = re.split("(<.*?>)", message)
for token in tokens:
if len(token) > 3 and token[0] == "<" and token[-1] == ">":
token = self.replace_id(token[1:-1])
new.append(token)
message = " ".join(new)
return message
def api_url(self):
return "https://{}.slack.com/api/".format(self.slack_name)
def get_channels(self, exclude_archived=True):
"""
return a {channel_name: channel_id} dictionary
if exclude_archived (default: True), only shows non-archived channels
"""
channels = self.get_all_channel_objects(exclude_archived=exclude_archived)
self.channels_by_id = {x['id']: x['name'] for x in channels}
self.channels_by_name = {x['name']: x['id'] for x in channels}
self.channels = self.channels_by_name
def get_channelid(self, channel_name):
return self.channels_by_name.get(channel_name)
def channel_exists(self, channel_name):
try:
# strip leading "#" if it exists, as Slack returns all channels without them
if channel_name[0] == "#":
channel = channel_name[1:]
else:
channel = channel_name
return self.channels_by_name[channel]
except KeyError as e: # channel not found
return None
def delete_message(self, cid, message_timestamp):
url_template = self.url + "chat.delete?token={}&channel={}&ts={}"
url = url_template.format(self.token, cid, message_timestamp)
ret = requests.get(url).json()
if not ret['ok']:
self.logger.error("Failed to delete message; error: {}".format(ret))
return ret['ok']
def get_channel_members_ids(self, channel_name):
"""
returns an array of member IDs for channel_name
"""
return self.get_channel_info(channel_name)['members']
def channel_has_only_restricted_members(self, channel_name):
"""
returns True if the channel only has restricted/ultra_restricted
members, False otherwise
"""
mids = set(self.get_channel_members_ids(channel_name))
self.logger.debug("Current members in {} are {}".format(channel_name, mids))
return mids.intersection(self.all_restricted_users)
def get_channel_member_names(self, channel_name):
"""
returns an array of ["@member"] for members of the channel
"""
members = self.get_channel_members_ids(channel_name)
return ["@" + self.users_by_id[x] for x in members]
def get_channel_info(self, channel_name):
"""
returns JSON with channel information. Adds 'age' in seconds to JSON
"""
url_template = self.url + "channels.info?token={}&channel={}"
cid = self.get_channelid(channel_name)
now = int(time.time())
url = url_template.format(self.token, cid)
ret = requests.get(url).json()
if ret['ok'] is not True:
m = "Attempted to get channel info for {}, but return was {}"
m = m.format(channel_name, ret)
raise RuntimeError(m)
created = ret['channel']['created']
age = now - created
ret['channel']['age'] = age
return ret['channel']
def get_all_channel_objects(self, exclude_archived=True):
"""
return all channels
if exclude_archived (default: True), only shows non-archived channels
"""
url_template = self.url + "channels.list?exclude_archived={}&token={}"
if exclude_archived:
exclude_archived = 1
else:
exclude_archived = 0
url = url_template.format(exclude_archived, self.token)
request = requests.get(url)
payload = request.json()
assert 'channels' in payload
return payload['channels']
def archive(self, channel_name):
url_template = self.url + "channels.archive?token={}&channel={}"
cid = self.get_channelid(channel_name)
url = url_template.format(self.token, cid)
request = requests.get(url)
payload = request.json()
return payload
def post_message(self, channel, message, message_type=None):
"""
Posts a `message` into a `channel`.
Optionally append an invisible attachment with 'fallback' set to `message_type`.
Note: `channel` value should not be preceded with '#'.
"""
assert channel # not blank
if channel[0] == '#':
channel = channel[1:]
post_data = {
'token': self.token,
'channel': channel,
'text': message.encode('utf-8')
}
if message_type:
post_data['attachments'] = json.dumps([{'fallback': message_type}], encoding='utf-8')
p = requests.post(self.url + "chat.postMessage", data=post_data)
return p.json()
| |
#
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Engine for generating reports"""
# System imports:
import ftplib
import glob
import os.path
import shutil
import socket
import sys
import syslog
import threading
import time
import traceback
# 3rd party imports:
import configobj
# Weewx imports:
import weeutil.weeutil
from weeutil.weeutil import to_bool
import weewx.manager
#===============================================================================
# Class StdReportEngine
#===============================================================================
class StdReportEngine(threading.Thread):
"""Reporting engine for weewx.
This engine runs zero or more reports. Each report uses a skin. A skin
has its own configuration file specifying things such as which 'generators'
should be run, which templates are to be used, what units are to be used, etc..
A 'generator' is a class inheriting from class ReportGenerator, that produces the parts
of the report, such as image plots, HTML files.
StdReportEngine inherits from threading.Thread, so it will be run in a separate
thread.
See below for examples of generators.
"""
def __init__(self, config_dict, stn_info, gen_ts=None, first_run=True):
"""Initializer for the report engine.
config_dict: The configuration dictionary.
stn_info: An instance of weewx.station.StationInfo, with static station information.
gen_ts: The timestamp for which the output is to be current [Optional; default
is the last time in the database]
first_run: True if this is the first time the report engine has been run.
If this is the case, then any 'one time' events should be done.
"""
threading.Thread.__init__(self, name="ReportThread")
self.config_dict = config_dict
self.stn_info = stn_info
self.gen_ts = gen_ts
self.first_run = first_run
def run(self):
"""This is where the actual work gets done.
Runs through the list of reports. """
if self.gen_ts:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Running reports for time %s" %
weeutil.weeutil.timestamp_to_string(self.gen_ts))
else:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Running reports for latest time in the database.")
# Iterate over each requested report
for report in self.config_dict['StdReport'].sections:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Running report %s" % report)
# Figure out where the configuration file is for the skin used for
# this report:
skin_config_path = os.path.join(self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['SKIN_ROOT'],
self.config_dict['StdReport'][report].get('skin', 'Standard'),
'skin.conf')
# Retrieve the configuration dictionary for the skin. Wrap it in
# a try block in case we fail
try :
skin_dict = configobj.ConfigObj(skin_config_path, file_error=True)
syslog.syslog(syslog.LOG_DEBUG, "reportengine: Found configuration file %s for report %s" %
(skin_config_path, report))
except IOError, e:
syslog.syslog(syslog.LOG_ERR, "reportengine: Cannot read skin configuration file %s for report %s: %s" % (skin_config_path, report, e))
syslog.syslog(syslog.LOG_ERR, " **** Report ignored...")
continue
except SyntaxError, e:
syslog.syslog(syslog.LOG_ERR, "reportengine: Failed to read skin configuration file %s for report %s: %s" % (skin_config_path, report, e))
syslog.syslog(syslog.LOG_ERR, " **** Report ignored...")
continue
# Add the default database binding:
skin_dict.setdefault('data_binding', 'wx_binding')
# If not already specified, default to logging each successful run
skin_dict.setdefault('log_success', True)
# Inject any overrides the user may have specified in the
# weewx.conf configuration file for all reports:
for scalar in self.config_dict['StdReport'].scalars:
skin_dict[scalar] = self.config_dict['StdReport'][scalar]
# Now inject any overrides for this specific report:
skin_dict.merge(self.config_dict['StdReport'][report])
# Finally, add the report name:
skin_dict['REPORT_NAME'] = report
for generator in weeutil.weeutil.option_as_list(skin_dict['Generators'].get('generator_list')):
try:
# Instantiate an instance of the class.
obj = weeutil.weeutil._get_object(generator)(self.config_dict,
skin_dict,
self.gen_ts,
self.first_run,
self.stn_info)
except Exception, e:
syslog.syslog(syslog.LOG_CRIT, "reportengine: Unable to instantiate generator %s." % generator)
syslog.syslog(syslog.LOG_CRIT, " **** %s" % e)
weeutil.weeutil.log_traceback(" **** ")
syslog.syslog(syslog.LOG_CRIT, " **** Generator ignored...")
traceback.print_exc()
continue
try:
# Call its start() method
obj.start()
except Exception, e:
# Caught unrecoverable error. Log it, continue on to the next generator.
syslog.syslog(syslog.LOG_CRIT, "reportengine: Caught unrecoverable exception in generator %s" % (generator,))
syslog.syslog(syslog.LOG_CRIT, " **** %s" % str(e))
weeutil.weeutil.log_traceback(" **** ")
syslog.syslog(syslog.LOG_CRIT, " **** Generator terminated...")
traceback.print_exc()
continue
finally:
obj.finalize()
#===============================================================================
# Class ReportGenerator
#===============================================================================
class ReportGenerator(object):
"""Base class for all report generators."""
def __init__(self, config_dict, skin_dict, gen_ts, first_run, stn_info):
self.config_dict = config_dict
self.skin_dict = skin_dict
self.gen_ts = gen_ts
self.first_run = first_run
self.stn_info = stn_info
self.db_binder = weewx.manager.DBBinder(self.config_dict)
def start(self):
self.run()
def run(self):
pass
def finalize(self):
self.db_binder.close()
#===============================================================================
# Class FtpGenerator
#===============================================================================
class FtpGenerator(ReportGenerator):
"""Class for managing the "FTP generator".
This will ftp everything in the public_html subdirectory to a webserver."""
def run(self):
import weeutil.ftpupload
# determine how much logging is desired
log_success = to_bool(self.skin_dict.get('log_success', True))
t1 = time.time()
if self.skin_dict.has_key('HTML_ROOT'):
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['HTML_ROOT'])
else:
local_root = os.path.join(self.config_dict['WEEWX_ROOT'],
self.config_dict['StdReport']['HTML_ROOT'])
try:
ftpData = weeutil.ftpupload.FtpUpload(server = self.skin_dict['server'],
user = self.skin_dict['user'],
password = self.skin_dict['password'],
local_root = local_root,
remote_root = self.skin_dict['path'],
port = int(self.skin_dict.get('port', 21)),
name = self.skin_dict['REPORT_NAME'],
passive = to_bool(self.skin_dict.get('passive', True)),
max_tries = int(self.skin_dict.get('max_tries', 3)),
secure = to_bool(self.skin_dict.get('secure_ftp', False)),
debug = int(self.skin_dict.get('debug', 0)))
except Exception:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: FTP upload not requested. Skipped.")
return
try:
N = ftpData.run()
except (socket.timeout, socket.gaierror, ftplib.all_errors, IOError), e:
(cl, unused_ob, unused_tr) = sys.exc_info()
syslog.syslog(syslog.LOG_ERR, "reportengine: Caught exception %s in FtpGenerator; %s." % (cl, e))
weeutil.weeutil.log_traceback(" **** ")
return
t2= time.time()
if log_success:
syslog.syslog(syslog.LOG_INFO, """reportengine: ftp'd %d files in %0.2f seconds""" % (N, (t2-t1)))
#===============================================================================
# Class RsynchGenerator
#===============================================================================
class RsyncGenerator(ReportGenerator):
"""Class for managing the "rsync generator".
This will rsync everything in the public_html subdirectory to a webserver."""
def run(self):
import weeutil.rsyncupload
# We don't try to collect performance statistics about rsync, because rsync
# will report them for us. Check the debug log messages.
try:
if self.skin_dict.has_key('HTML_ROOT'):
html_root = self.skin_dict['HTML_ROOT']
else:
html_root = self.config_dict['StdReport']['HTML_ROOT']
rsyncData = weeutil.rsyncupload.RsyncUpload(
local_root = os.path.join(self.config_dict['WEEWX_ROOT'], html_root),
remote_root = self.skin_dict['path'],
server = self.skin_dict['server'],
user = self.skin_dict.get('user'),
port = self.skin_dict.get('port'),
ssh_options = self.skin_dict.get('ssh_options'),
compress = to_bool(self.skin_dict.get('compress', False)),
delete = to_bool(self.skin_dict.get('delete', False)),
log_success = to_bool(self.skin_dict.get('log_success', True)))
except Exception:
syslog.syslog(syslog.LOG_DEBUG, "reportengine: rsync upload not requested. Skipped.")
return
try:
rsyncData.run()
except (IOError), e:
(cl, unused_ob, unused_tr) = sys.exc_info()
syslog.syslog(syslog.LOG_ERR, "reportengine: Caught exception %s in RsyncGenerator; %s." % (cl, e))
#===============================================================================
# Class CopyGenerator
#===============================================================================
class CopyGenerator(ReportGenerator):
"""Class for managing the 'copy generator.'
This will copy files from the skin subdirectory to the public_html
subdirectory."""
def run(self):
copy_dict = self.skin_dict['CopyGenerator']
# determine how much logging is desired
log_success = to_bool(copy_dict.get('log_success', True))
copy_list = []
if self.first_run:
# Get the list of files to be copied only once, at the first invocation of
# the generator. Wrap in a try block in case the list does not exist.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_once'])
except KeyError:
pass
# Get the list of files to be copied everytime. Again, wrap in a try block.
try:
copy_list += weeutil.weeutil.option_as_list(copy_dict['copy_always'])
except KeyError:
pass
# Change directory to the skin subdirectory:
os.chdir(os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['SKIN_ROOT'],
self.skin_dict['skin']))
# Figure out the destination of the files
html_dest_dir = os.path.join(self.config_dict['WEEWX_ROOT'],
self.skin_dict['HTML_ROOT'])
# The copy list can contain wildcard characters. Go through the
# list globbing any character expansions
ncopy = 0
for pattern in copy_list:
# Glob this pattern; then go through each resultant filename:
for _file in glob.glob(pattern):
# Final destination is the join of the html destination directory
# and any relative subdirectory on the filename:
dest_dir = os.path.join(html_dest_dir, os.path.dirname(_file))
# Make the destination directory, wrapping it in a try block in
# case it already exists:
try:
os.makedirs(dest_dir)
except OSError:
pass
# This version of copy does not copy over modification time,
# so it will look like a new file, causing it to be (for example)
# ftp'd to the server:
shutil.copy(_file, dest_dir)
ncopy += 1
if log_success:
syslog.syslog(syslog.LOG_INFO, "reportengine: copied %d files to %s" % (ncopy, html_dest_dir))
| |
# coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.test.utils import override_settings
from django.utils.timezone import now, timedelta
from rest_framework.reverse import reverse
from rest_messaging.models import Message, NotificationCheck, Participation, Thread
from .utils import TestScenario, parse_json_response
import json
class ThreadViewTests(TestScenario):
def setUp(self):
super(ThreadViewTests, self).setUp()
self.url = reverse('rest_messaging:threads-list')
def test_retrieve(self):
# no authentication
response = self.client_unauthenticated.get("{0}{1}/".format(self.url, self.thread1.id))
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.get("{0}{1}/".format(self.url, self.thread_unrelated.id))
self.assertEqual(403, response.status_code)
# ok
response = self.client_authenticated.get("{0}{1}/".format(self.url, self.thread1.id))
self.assertEqual(200, response.status_code)
self.assertEqual(set(parse_json_response(response.data)["participants"]), set([self.participant1.id, self.participant2.id, self.participant3.id])) # we do not care about ordering
self.assertEqual(parse_json_response(response.data)["removable_participants_ids"], [self.participant1.id])
@override_settings(REST_MESSAGING_SERIALIZE_PARTICIPANTS_CALLBACK=lambda *args, **kwargs: ["a", "b", "c"])
def test_retrieve_callback(self):
# we try with a callback (full callback is tested in TestThreadSerializer, here we only test override_settings)
response = self.client_authenticated.get("{0}{1}/".format(self.url, self.thread1.id))
self.assertEqual(parse_json_response(response.data)["participants"], ["a", "b", "c"])
def test_list(self):
# not implemented
response = self.client_unauthenticated.get(self.url)
self.assertEqual(403, response.status_code)
response = self.client_authenticated.get(self.url)
self.assertEqual(405, response.status_code)
def test_create(self):
data = {"name": "Thread name", "participants": json.dumps([self.participant3.id, self.participant5.id])} # self.participant1.id will be added automatically since he is requets.user
response = self.client_unauthenticated.post(self.url)
self.assertEqual(403, response.status_code)
# we ensure a new thread is created
last_thread = Thread.objects.latest('id')
response = self.client_authenticated.post(self.url, data=data)
self.assertEqual(201, response.status_code)
# the users have been added to the serializer
expected = dict({"name": "Thread name", "participants": [self.participant1.id, self.participant3.id, self.participant5.id], "id": Thread.objects.latest('id').id})
self.assertEqual(parse_json_response(response.data)["name"], expected["name"])
self.assertEqual(parse_json_response(response.data)["id"], expected["id"])
self.assertEqual(set(parse_json_response(response.data)["participants"]), set(expected["participants"])) # we do not acre about the order
# the thread has been created
new_thread = Thread.objects.latest('id')
self.assertNotEqual(last_thread.id, new_thread.id)
# we repost with the same participants
# this time a new thread should not be created
response = self.client_authenticated.post(self.url, data=data)
self.assertEqual(201, response.status_code)
self.assertEqual(new_thread.id, Thread.objects.latest('id').id)
def test_update(self):
# no authentication
response = self.client_unauthenticated.delete(self.url)
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.put("{0}{1}/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# only the name can be updated directly, not the participants (ManyToMany)
data = {"name": "New thread name"}
response = self.client_authenticated.put("{0}{1}/".format(self.url, self.thread1.id), data=data)
self.assertEqual(200, response.status_code)
# the name has been updated
self.assertEqual(data["name"], Thread.objects.get(id=self.thread1.id).name)
# posting participants is not allowed
data = {"name": "Another thread name", "participants": [self.participant1.id, self.participant3.id, self.participant5.id]}
response = self.client_authenticated.put("{0}{1}/".format(self.url, self.thread1.id), data=data)
self.assertEqual(400, response.status_code)
def test_delete(self):
response = self.client_unauthenticated.delete(self.url)
self.assertEqual(403, response.status_code)
response = self.client_authenticated.delete(self.url)
self.assertEqual(405, response.status_code)
def test_add_participants(self):
# no authentication
response = self.client_unauthenticated.post("{0}{1}/add_participants/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.post("{0}{1}/add_participants/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# ok
data = {"participants": json.dumps([self.participant1.id, self.participant3.id, self.participant5.id])}
response = self.client_authenticated.post("{0}{1}/add_participants/".format(self.url, self.thread1.id), data=data)
self.assertEqual(200, response.status_code)
parsed = parse_json_response(response.data)
self.assertEqual(set(parsed["participants"]), set([self.participant1.id, self.participant2.id, self.participant3.id, self.participant5.id]))
self.assertEqual(parsed["name"], self.thread1.name)
def test_remove_participant(self):
# no authentication
response = self.client_unauthenticated.post("{0}{1}/remove_participant/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.post("{0}{1}/remove_participant/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# may not remove another one excepted if the callback says so
response = self.client_authenticated.post("{0}{1}/remove_participant/".format(self.url, self.thread1.id), data={"participant": self.participant2.id})
self.assertEqual(400, response.status_code)
# ok
# we fisrt ensure participant 1 is active
p = Participation.objects.get(participant=self.participant1, thread=self.thread1)
self.assertEqual(p.date_left, None)
# we will remove him
response = self.client_authenticated.post("{0}{1}/remove_participant/".format(self.url, self.thread1.id), data={"participant": self.participant1.id})
self.assertEqual(200, response.status_code)
parsed = parse_json_response(response.data)
# we get the Thread
self.assertEqual(parsed["name"], self.thread1.name)
# participant 1 is still there ...
self.assertEqual(set(parsed["participants"]), set([self.participant1.id, self.participant2.id, self.participant3.id]))
# ... but he has left
p = Participation.objects.get(participant=self.participant1, thread=self.thread1)
self.assertNotEqual(p.date_left, None)
def test_get_removable_participants_ids(self):
# no authentication
response = self.client_unauthenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# ok
response = self.client_authenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread1.id), data={})
self.assertEqual(200, response.status_code)
parsed = parse_json_response(response.data)
self.assertEqual(parsed["participants"], [self.participant1.id])
@override_settings(REST_MESSAGING_REMOVE_PARTICIPANTS_CALLBACK=lambda *args, **kwargs: [3, 4, 5])
def test_get_removable_participants_ids_with_callback(self):
# no authentication
response = self.client_unauthenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# ok
response = self.client_authenticated.get("{0}{1}/get_removable_participants_ids/".format(self.url, self.thread1.id), data={})
self.assertEqual(200, response.status_code)
parsed = parse_json_response(response.data)
self.assertEqual(parsed["participants"], [3, 4, 5])
def test_mark_thread_as_read(self):
# no authentication
response = self.client_unauthenticated.post("{0}{1}/mark_thread_as_read/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.post("{0}{1}/mark_thread_as_read/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# ok
response = self.client_authenticated.post("{0}{1}/mark_thread_as_read/".format(self.url, self.thread1.id), data={})
self.assertEqual(200, response.status_code)
parsed = parse_json_response(response.data)
self.assertEqual(parsed["id"], self.thread1.id)
class MessageViewTests(TestScenario):
def setUp(self):
super(MessageViewTests, self).setUp()
self.url = reverse('rest_messaging:messages-list')
def test_get_queryset(self):
# no authentication
response = self.client_unauthenticated.get(self.url)
self.assertEqual(403, response.status_code)
# ok
# participant 3 has read the 2 last messages, 1 only the first
p1 = Participation.objects.create(participant=self.participant3, thread=self.thread3)
p1.date_last_check = now() - timedelta(days=1)
p1.save()
p2 = Participation.objects.create(participant=self.participant1, thread=self.thread3)
p2.date_last_check = now() - timedelta(days=2)
p2.save()
response = self.client_authenticated.get(self.url)
self.assertEqual(200, response.status_code)
messages_dct = parse_json_response(response.data)
messages = messages_dct["results"]
self.assertEqual(3, len(messages))
self.assertEqual(messages[0]["id"], self.m33.id)
self.assertEqual(messages[1]["id"], self.m22.id)
self.assertEqual(messages[2]["id"], self.m11.id)
self.assertEqual([], messages[0]["readers"])
self.assertEqual(messages[0]["is_notification"], True) # not read
self.assertEqual(messages[1]["is_notification"], False) # because written by the user himself
self.assertEqual(messages[2]["is_notification"], False) # because written by the user himself
def test_post_message(self):
# no authentication
response = self.client_unauthenticated.post("{0}{1}/post_message/".format(self.url, self.thread1.id), data={})
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.post("{0}{1}/post_message/".format(self.url, self.thread_unrelated.id), data={})
self.assertEqual(403, response.status_code)
# ok
body = "New message!"
response = self.client_authenticated.post("{0}{1}/post_message/".format(self.url, self.thread1.id), data={"body": body})
self.assertEqual(201, response.status_code)
parsed = parse_json_response(response.data)
last_message = Message.objects.latest('id')
self.assertEqual(parsed["id"], last_message.id)
self.assertTrue(parsed["body"] == last_message.body == body)
self.assertEqual(parsed["sender"], self.participant1.id)
def test_list_messages_in_thread(self):
# no authentication
response = self.client_unauthenticated.get("{0}{1}/list_messages_in_thread/".format(self.url, self.thread1.id))
self.assertEqual(403, response.status_code)
# no permission
response = self.client_authenticated.get("{0}{1}/list_messages_in_thread/".format(self.url, self.thread_unrelated.id))
self.assertEqual(403, response.status_code)
# ok
# participant 3 has read the 2 last messages, 1 only the first
p1 = Participation.objects.create(participant=self.participant3, thread=self.thread3)
p1.date_last_check = now() - timedelta(days=1)
p1.save()
p2 = Participation.objects.create(participant=self.participant1, thread=self.thread3)
p2.date_last_check = now() - timedelta(days=2)
p2.save()
# we change the date of the messages
self.m31.sent_at = p1.date_last_check = now() - timedelta(days=3)
self.m31.save()
self.m32.sent_at = p1.date_last_check = now() - timedelta(days=1, hours=12)
self.m32.save()
response = self.client_authenticated.get("{0}{1}/list_messages_in_thread/".format(self.url, self.thread3.id))
messages_dct = parse_json_response(response.data)
messages = messages_dct["results"]
self.assertEqual([self.m33.id, self.m32.id, self.m31.id], [m["id"] for m in messages])
self.assertEqual([set([]), set([self.participant3.id]), set([self.participant1.id, self.participant3.id])], [set(m["readers"]) for m in messages])
class TestMessageNotificationCheckView(TestScenario):
def setUp(self):
super(TestMessageNotificationCheckView, self).setUp()
self.url = reverse('rest_messaging:notifications-list')
def test_check(self):
# no authentication
response = self.client_unauthenticated.post("{0}check/".format(self.url), data={})
self.assertEqual(403, response.status_code)
# participant 1 already has a check, it will be updated
response = self.client_authenticated.post("{0}check/".format(self.url), data={})
self.assertEqual(200, response.status_code)
reload_notification_check = NotificationCheck.objects.get(participant=self.participant1)
self.assertEqual(reload_notification_check.id, self.notification_check.id)
self.assertTrue(reload_notification_check.date_check > self.notification_check.date_check)
class TestParticipantAuthenticationView(TestScenario):
def setUp(self):
super(TestParticipantAuthenticationView, self).setUp()
self.url = reverse('rest_messaging:authentication-list')
def test_get(self):
# no authentication
response_unauthenticated = self.client_unauthenticated.get(self.url)
self.assertEqual(403, response_unauthenticated.status_code)
# authentication
response_authenticated = self.client_authenticated.get(self.url)
self.assertEqual(200, response_authenticated.status_code)
participant_dct = parse_json_response(response_authenticated.data)
self.assertEqual(participant_dct['id'], self.participant1.id)
self.assertEqual(len(participant_dct.keys()), 1)
| |
"""
Script entry point
"""
from src.calrissian.particle_vector_n_network_local_conv4 import ParticleVectorNLocalConvolution4Network
from src.calrissian.layers.particle_vector_n_local_conv4 import ParticleVectorNLocalConvolution4
from src.calrissian.layers.particle_vector_n_local_conv4 import ParticleVectorNLocalConvolution4Input
import numpy as np
import time
def main():
# train_X = np.asarray([[0.2, -0.3]])
# train_Y = np.asarray([[0.0, 1.0, 0.0]])
train_X = np.asarray([[0.45, 3.33], [0.0, 2.22]])
train_Y = np.asarray([[1.0, 0.0], [0.0, 1.0]])
net = ParticleVectorNLocalConvolution4Network(cost="mse", particle_input=ParticleVectorNLocalConvolution4Input(2))
net.append(ParticleVectorNLocalConvolution4(2, 3, activation="sigmoid", apply_convolution=True, delta_r=0.1,
n_steps=[-1, 0, 1],
pool_size=[2, 2, 1], pool_stride=[0.1, 0.1, 0.1]))
net.append(ParticleVectorNLocalConvolution4(3, 2, activation="sigmoid"))
print(net.particle_input.get_rxyz())
print(net.predict(train_X))
print(net.cost(train_X, train_Y))
# print(net.cost_gradient(train_X, train_Y))
def timer():
nr = 3
nv = 3
nw = 3
train_X = np.asarray([[0.2, -0.3], [0.1, -0.9], [0.1, 0.05], [0.01, 0.01], [0.03, 0.04]])
train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
net = ParticleVectorNLocalConvolution4Network(cost="categorical_cross_entropy",
particle_input=ParticleVectorNLocalConvolution4Input(2))
net.append(ParticleVectorNLocalConvolution4(2, 5, activation="tanh", potential="gaussian",
apply_convolution=True, delta_r=0.4,
pool_size=[4, 4, 1], pool_stride=[0.1, 0.1, 0.1]))
net.append(ParticleVectorNLocalConvolution4(5, 4, activation="tanh", srl=[0.1, 0.5, 0.5]))
net.append(ParticleVectorNLocalConvolution4(4, 3, activation="softmax"))
# Finite difference checking
ts = time.time()
total = 0.0
for i in range(50):
db, dr, dn, dm = net.cost_gradient(train_X, train_Y)
total += i
print("Time for {}: {}".format(i+1, time.time() - ts))
def fd():
nr = 3
nv = 3
nw = 3
train_X = np.asarray([[0.2, -0.3], [0.1, -0.9], [0.1, 0.05], [0.01, 0.01], [0.03, 0.04]])
train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
net = ParticleVectorNLocalConvolution4Network(cost="categorical_cross_entropy",
particle_input=ParticleVectorNLocalConvolution4Input(2))
net.append(ParticleVectorNLocalConvolution4(2, 5, activation="tanh", potential="gaussian",
apply_convolution=True, delta_r=0.4,
n_steps=[1, 2, 3],
pool_size=[3, 3, 1], pool_stride=[0.1, 0.1, 0.1]))
net.append(ParticleVectorNLocalConvolution4(5, 4, activation="tanh", srl=[0.1, 0.5, 0.5]))
net.append(ParticleVectorNLocalConvolution4(4, 3, activation="softmax"))
# Finite difference checking
ts = time.time()
net.cost(train_X, train_Y)
db, dr, dn, dm = net.cost_gradient(train_X, train_Y)
print("Time: {}".format(time.time() - ts))
h = 0.001
print("analytic b")
print(db)
fd_b = []
for l in range(len(net.layers)):
lb = []
for c in range(len(net.layers[l].b)):
for b in range(len(net.layers[l].b[c])):
orig = net.layers[l].b[c][b]
net.layers[l].b[c][b] += h
fp = net.cost(train_X, train_Y)
net.layers[l].b[c][b] -= 2*h
fm = net.cost(train_X, train_Y)
lb.append((fp - fm) / (2*h))
net.layers[l].b[c][b] = orig
fd_b.append(lb)
print("numerical b")
print(fd_b)
for r in range(nr):
fd_r_x = []
# input first
layer = net.particle_input
lr_x = []
for i in range(layer.output_size):
# x
orig = layer.positions[r][i]
layer.positions[r][i] += h
fp = net.cost(train_X, train_Y)
layer.positions[r][i] -= 2*h
fm = net.cost(train_X, train_Y)
lr_x.append((fp - fm) / (2*h))
layer.positions[r][i] = orig
fd_r_x.append(lr_x)
# layers
for layer in net.layers:
lr_x = []
for i in range(layer.output_size):
# x
orig = layer.positions[r][i]
layer.positions[r][i] += h
fp = net.cost(train_X, train_Y)
layer.positions[r][i] -= 2*h
fm = net.cost(train_X, train_Y)
lr_x.append((fp - fm) / (2*h))
layer.positions[r][i] = orig
fd_r_x.append(lr_x)
print("analytic r: " + str(r))
for layer in dr:
print(layer[r])
print("numerical r: " + str(r))
for f in fd_r_x:
print(f)
for v in range(nv):
fd_n_x = []
# input first
layer = net.particle_input
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nvectors[v][i]
layer.nvectors[v][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
# layers
for layer in net.layers:
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nvectors[v][i]
layer.nvectors[v][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
print("analytic n: " + str(v))
for layer in dn:
print(layer[v])
print("numerical n: " + str(v))
for f in fd_n_x:
print(f)
for w in range(nw):
fd_n_x = []
# input first
layer = net.particle_input
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nwectors[w][i]
layer.nwectors[w][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
# layers
for layer in net.layers:
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nwectors[w][i]
layer.nwectors[w][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
print("analytic m: " + str(w))
for layer in dm:
print(layer[w])
print("numerical m: " + str(w))
for f in fd_n_x:
print(f)
def fd2():
nr = 3
nv = 3
nw = 3
train_X = np.asarray([[0.2, -0.3], [0.1, -0.9], [0.1, 0.05], [0.01, 0.01], [0.03, 0.04], [0.03, 0.04]])
train_Y = np.asarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
net = ParticleVectorNLocalConvolution4Network(cost="categorical_cross_entropy",
particle_input=ParticleVectorNLocalConvolution4Input(2))
net.append(ParticleVectorNLocalConvolution4(2, 7, activation="tanh", potential="gaussian",
apply_convolution=True, delta_r=0.4,
n_steps=[0, 1, 2, 3],
pool_size=[3, 3, 1], pool_stride=[0.1, 0.1, 0.1]))
net.append(ParticleVectorNLocalConvolution4(7, 5, activation="tanh", potential="gaussian",
apply_convolution=True, delta_r=0.2,
n_steps=[0, 1, 2],
pool_size=[2, 2, 1], pool_stride=[0.2, 0.2, 0.2]))
net.append(ParticleVectorNLocalConvolution4(5, 4, activation="tanh", srl=[0.1, 0.5, 0.5]))
net.append(ParticleVectorNLocalConvolution4(4, 3, activation="softmax"))
# Finite difference checking
ts = time.time()
net.cost(train_X, train_Y)
db, dr, dn, dm = net.cost_gradient(train_X, train_Y)
print("Time: {}".format(time.time() - ts))
h = 0.001
print("analytic b")
print(db)
fd_b = []
for l in range(len(net.layers)):
lb = []
for c in range(len(net.layers[l].b)):
for b in range(len(net.layers[l].b[c])):
orig = net.layers[l].b[c][b]
net.layers[l].b[c][b] += h
fp = net.cost(train_X, train_Y)
net.layers[l].b[c][b] -= 2*h
fm = net.cost(train_X, train_Y)
lb.append((fp - fm) / (2*h))
net.layers[l].b[c][b] = orig
fd_b.append(lb)
print("numerical b")
print(fd_b)
for r in range(nr):
fd_r_x = []
# input first
layer = net.particle_input
lr_x = []
for i in range(layer.output_size):
# x
orig = layer.positions[r][i]
layer.positions[r][i] += h
fp = net.cost(train_X, train_Y)
layer.positions[r][i] -= 2*h
fm = net.cost(train_X, train_Y)
lr_x.append((fp - fm) / (2*h))
layer.positions[r][i] = orig
fd_r_x.append(lr_x)
# layers
for layer in net.layers:
lr_x = []
for i in range(layer.output_size):
# x
orig = layer.positions[r][i]
layer.positions[r][i] += h
fp = net.cost(train_X, train_Y)
layer.positions[r][i] -= 2*h
fm = net.cost(train_X, train_Y)
lr_x.append((fp - fm) / (2*h))
layer.positions[r][i] = orig
fd_r_x.append(lr_x)
print("analytic r: " + str(r))
for layer in dr:
print(layer[r])
print("numerical r: " + str(r))
for f in fd_r_x:
print(f)
for v in range(nv):
fd_n_x = []
# input first
layer = net.particle_input
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nvectors[v][i]
layer.nvectors[v][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
# layers
for layer in net.layers:
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nvectors[v][i]
layer.nvectors[v][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nvectors[v][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
print("analytic n: " + str(v))
for layer in dn:
print(layer[v])
print("numerical n: " + str(v))
for f in fd_n_x:
print(f)
for w in range(nw):
fd_n_x = []
# input first
layer = net.particle_input
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nwectors[w][i]
layer.nwectors[w][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
# layers
for layer in net.layers:
ln_x = []
for i in range(layer.output_size):
# x
orig_x = layer.nwectors[w][i]
layer.nwectors[w][i] = orig_x + h
fp = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x - h
fm = net.cost(train_X, train_Y)
layer.nwectors[w][i] = orig_x
ln_x.append((fp - fm) / (2 * h))
fd_n_x.append(ln_x)
print("analytic m: " + str(w))
for layer in dm:
print(layer[w])
print("numerical m: " + str(w))
for f in fd_n_x:
print(f)
if __name__ == "__main__":
# Ensure same seed
np.random.seed(100)
# main()
# fd()
fd2()
# timer()
| |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.simulation.baselines import task_data
from tensorflow_federated.python.simulation.datasets import client_data
def create_client_data(num_clients):
client_ids = [str(x) for x in range(num_clients)]
def create_dataset_fn(client_id):
num_examples = tf.strings.to_number(client_id, out_type=tf.int64) + 1
return tf.data.Dataset.range(num_examples)
return client_data.ClientData.from_clients_and_tf_fn(client_ids,
create_dataset_fn)
class GetElementSpecTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('federated_no_preprocess', 'Federated', False),
('federated_preprocess', 'Federated', True),
('centralized_no_preprocess', 'Centralized', False),
('centralized_preprocess', 'Centralized', True),
)
def test_get_element_spec(self, dataset_type, preprocess):
data = create_client_data(10)
if dataset_type == 'Centralized':
data = data.create_tf_dataset_from_all_clients()
def convert_int64_to_int32(dataset):
return dataset.map(lambda x: tf.cast(x, tf.int32))
if preprocess:
preprocess_fn = convert_int64_to_int32
expected_type = tf.TensorSpec(shape=(), dtype=tf.int32, name=None)
else:
preprocess_fn = None
expected_type = tf.TensorSpec(shape=(), dtype=tf.int64, name=None)
actual_type = task_data._get_element_spec(data, preprocess_fn)
self.assertEqual(actual_type, expected_type)
class BaselineTaskDatasetsTest(tf.test.TestCase, parameterized.TestCase):
def test_raises_when_train_and_test_types_are_different_no_preprocessing(
self):
train_data = create_client_data(10)
test_data = tf.data.Dataset.range(10, output_type=tf.int32)
with self.assertRaisesRegex(
ValueError,
'train and test element structures after preprocessing must be equal'):
task_data.BaselineTaskDatasets(train_data=train_data, test_data=test_data)
def test_raises_when_train_and_test_types_are_different_with_train_preprocessing(
self):
train_data = create_client_data(10)
test_data = tf.data.Dataset.range(10)
train_preprocess_fn = lambda x: x.map(lambda y: tf.cast(y, dtype=tf.int32))
with self.assertRaisesRegex(
ValueError,
'train and test element structures after preprocessing must be equal'):
task_data.BaselineTaskDatasets(
train_data=train_data,
train_preprocess_fn=train_preprocess_fn,
test_data=test_data)
def test_raises_when_train_and_test_types_are_different_with_eval_preprocessing(
self):
train_data = create_client_data(10)
test_data = tf.data.Dataset.range(10)
eval_preprocess_fn = lambda x: x.map(lambda y: tf.cast(y, dtype=tf.int32))
with self.assertRaisesRegex(
ValueError,
'train and test element structures after preprocessing must be equal'):
task_data.BaselineTaskDatasets(
train_data=train_data,
eval_preprocess_fn=eval_preprocess_fn,
test_data=test_data)
def test_raises_when_test_and_validation_types_are_different(self):
train_data = create_client_data(10)
test_data = tf.data.Dataset.range(10)
validation_data = tf.data.Dataset.range(10, output_type=tf.int32)
with self.assertRaisesRegex(
ValueError,
'validation set must be None, or have the same element type structure '
'as the test data'):
task_data.BaselineTaskDatasets(
train_data=train_data,
test_data=test_data,
validation_data=validation_data)
def test_constructs_without_eval_preprocess_fn(self):
preprocess_fn = lambda x: x.map(lambda y: 2 * y)
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(10),
train_preprocess_fn=preprocess_fn,
test_data=create_client_data(2))
train_preprocess_fn = test_task_data.train_preprocess_fn
example_dataset = train_preprocess_fn(tf.data.Dataset.range(20))
for i, x in enumerate(example_dataset):
self.assertEqual(2 * i, x.numpy())
def test_constructs_with_eval_preprocess_fn(self):
train_preprocess_fn = lambda x: x.map(lambda y: 2 * y)
eval_preprocess_fn = lambda x: x.map(lambda y: 3 * y)
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(10),
train_preprocess_fn=train_preprocess_fn,
test_data=create_client_data(2),
eval_preprocess_fn=eval_preprocess_fn)
example_dataset = test_task_data.eval_preprocess_fn(
tf.data.Dataset.range(20))
for i, x in enumerate(example_dataset):
self.assertEqual(3 * i, x.numpy())
def test_sample_train_clients_returns_train_datasets(self):
train_data = create_client_data(10)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data, test_data=create_client_data(2))
all_client_datasets = [
train_data.create_tf_dataset_for_client(x)
for x in train_data.client_ids
]
all_client_datasets_as_lists = [
list(ds.as_numpy_iterator()) for ds in all_client_datasets
]
sampled_client_datasets = test_task_data.sample_train_clients(num_clients=3)
for ds in sampled_client_datasets:
ds_as_list = list(ds.as_numpy_iterator())
self.assertIn(ds_as_list, all_client_datasets_as_lists)
def test_sample_train_clients_returns_preprocessed_train_datasets(self):
preprocess_fn = lambda x: x.map(lambda y: 2 * y)
train_data = create_client_data(10)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data,
train_preprocess_fn=preprocess_fn,
test_data=create_client_data(2))
preprocess_train_data = train_data.preprocess(preprocess_fn)
all_client_datasets = [
preprocess_train_data.create_tf_dataset_for_client(x)
for x in preprocess_train_data.client_ids
]
all_client_datasets_as_lists = [
list(ds.as_numpy_iterator()) for ds in all_client_datasets
]
sampled_client_datasets = test_task_data.sample_train_clients(num_clients=5)
for ds in sampled_client_datasets:
ds_as_list = list(ds.as_numpy_iterator())
self.assertIn(ds_as_list, all_client_datasets_as_lists)
def test_sample_train_clients_random_seed(self):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(100), test_data=create_client_data(2))
client_datasets1 = test_task_data.sample_train_clients(
num_clients=5, random_seed=0)
data1 = [list(ds.as_numpy_iterator()) for ds in client_datasets1]
client_datasets2 = test_task_data.sample_train_clients(
num_clients=5, random_seed=0)
data2 = [list(ds.as_numpy_iterator()) for ds in client_datasets2]
client_datasets3 = test_task_data.sample_train_clients(
num_clients=5, random_seed=1)
data3 = [list(ds.as_numpy_iterator()) for ds in client_datasets3]
self.assertAllEqual(data1, data2)
self.assertNotAllEqual(data1, data3)
def test_create_centralized_test_from_client_data(self):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(100), test_data=create_client_data(3))
test_data = test_task_data.get_centralized_test_data()
self.assertSameElements(
list(test_data.as_numpy_iterator()), [0, 0, 0, 1, 1, 2])
def test_create_centralized_test_from_client_data_with_eval_preprocess(self):
eval_preprocess_fn = lambda x: x.map(lambda y: 3 * y)
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(100),
test_data=create_client_data(3),
eval_preprocess_fn=eval_preprocess_fn)
test_data = test_task_data.get_centralized_test_data()
self.assertSameElements(
list(test_data.as_numpy_iterator()), [0, 0, 0, 3, 3, 6])
def test_create_centralized_test_from_dataset(self):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(100), test_data=tf.data.Dataset.range(7))
test_data = test_task_data.get_centralized_test_data()
self.assertSameElements(list(test_data.as_numpy_iterator()), list(range(7)))
def test_create_centralized_test_from_dataset_with_eval_preprocess(self):
eval_preprocess_fn = lambda x: x.map(lambda y: 3 * y)
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(100),
test_data=tf.data.Dataset.range(7),
eval_preprocess_fn=eval_preprocess_fn)
test_data = test_task_data.get_centralized_test_data()
expected_data = [3 * a for a in range(7)]
self.assertSameElements(list(test_data.as_numpy_iterator()), expected_data)
@parameterized.named_parameters(
('num_clients1', 1),
('num_clients2', 4),
('num_clients3', 10),
)
def test_record_train_dataset_info(self, num_clients):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(num_clients),
test_data=create_client_data(2))
actual_train_info = test_task_data._record_dataset_information()['train']
expected_train_info = ['Train', 'Federated', num_clients]
self.assertEqual(actual_train_info, expected_train_info)
@parameterized.named_parameters(
('test_config1', 'Federated', 4),
('test_config2', 'Federated', 15),
('test_config3', 'Centralized', 'N/A'),
)
def test_record_test_dataset_info(self, test_dataset_type, num_clients):
if test_dataset_type == 'Federated':
test_data = create_client_data(num_clients)
else:
test_data = tf.data.Dataset.range(5)
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(1), test_data=test_data)
actual_test_info = test_task_data._record_dataset_information()['test']
expected_test_info = ['Test', test_dataset_type, num_clients]
self.assertEqual(actual_test_info, expected_test_info)
@parameterized.named_parameters(
('validation_config1', 'Federated', 5),
('validation_config2', 'Federated', 23),
('validation_config3', 'Centralized', 'N/A'),
('validation_config4', None, None),
)
def test_record_validation_dataset_info(self, validation_dataset_type,
num_clients):
if validation_dataset_type == 'Federated':
validation_data = create_client_data(num_clients)
elif validation_dataset_type == 'Centralized':
validation_data = tf.data.Dataset.range(2)
else:
validation_data = None
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(1),
test_data=create_client_data(1),
validation_data=validation_data)
if validation_dataset_type is None:
self.assertNotIn('validation',
test_task_data._record_dataset_information())
else:
actual_validation_info = test_task_data._record_dataset_information(
)['validation']
expected_validation_info = [
'Validation', validation_dataset_type, num_clients
]
self.assertEqual(actual_validation_info, expected_validation_info)
@parameterized.named_parameters(
('is_not_none', lambda x: x, True),
('is_none', None, False),
)
def test_summary_train_preprocess_fn(self, train_preprocess_fn, is_not_none):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(10),
train_preprocess_fn=train_preprocess_fn,
test_data=create_client_data(2))
summary_list = []
test_task_data.summary(print_fn=summary_list.append)
expected_train_preprocess_summary = 'Train Preprocess Function: {}'.format(
is_not_none)
self.assertEqual(summary_list[5], expected_train_preprocess_summary)
@parameterized.named_parameters(
('is_not_none', lambda x: x, True),
('is_none', None, False),
)
def test_summary_eval_preprocess_fn(self, eval_preprocess_fn, is_not_none):
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(10),
eval_preprocess_fn=eval_preprocess_fn,
test_data=create_client_data(2))
summary_list = []
test_task_data.summary(print_fn=summary_list.append)
expected_eval_preprocess_summary = 'Eval Preprocess Function: {}'.format(
is_not_none)
self.assertEqual(summary_list[6], expected_eval_preprocess_summary)
@parameterized.named_parameters(
('config1', 'Federated', 'Federated'),
('config2', 'Federated', 'Centralized'),
('config3', 'Centralized', 'Federated'),
('config4', 'Centralized', 'Centralized'),
('config5', 'Federated', None),
('config6', 'Centralized', None),
)
def test_data_summary_header_is_constant(self, test_type, validation_type):
train_data = create_client_data(10)
if test_type == 'Federated':
test_data = create_client_data(5)
else:
test_data = tf.data.Dataset.range(5)
if validation_type == 'Federated':
validation_data = create_client_data(4)
elif validation_type == 'Centralized':
validation_data = tf.data.Dataset.range(7)
else:
validation_data = None
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data,
test_data=test_data,
validation_data=validation_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
actual_header_values = data_summary[0].split()
expected_header_values = [
'Split', '|Dataset', 'Type', '|Number', 'of', 'Clients', '|'
]
self.assertEqual(actual_header_values, expected_header_values)
@parameterized.named_parameters(
('num_clients1', 1),
('num_clients2', 9),
('num_clients3', 7),
)
def test_summary_gives_correct_train_information(self, num_clients):
train_data = create_client_data(num_clients)
test_data = tf.data.Dataset.range(5)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data, test_data=test_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
actual_train_summary = data_summary[2].split()
expected_train_summary = [
'Train', '|Federated', '|{}'.format(num_clients), '|'
]
self.assertEqual(actual_train_summary, expected_train_summary)
@parameterized.named_parameters(
('test_config1', 'Federated', 4),
('test_config2', 'Federated', 15),
('test_config3', 'Centralized', 'N/A'),
)
def test_summary_gives_correct_test_information(self, test_type, num_clients):
train_data = create_client_data(5)
if test_type == 'Federated':
test_data = create_client_data(num_clients)
else:
test_data = tf.data.Dataset.range(5)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data, test_data=test_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
actual_test_summary = data_summary[3].split()
expected_test_summary = [
'Test', '|{}'.format(test_type), '|{}'.format(num_clients), '|'
]
self.assertEqual(actual_test_summary, expected_test_summary)
@parameterized.named_parameters(
('validation_config1', 'Federated', 5),
('validation_config2', 'Federated', 23),
('validation_config3', 'Centralized', 'N/A'),
)
def test_summary_gives_correct_validation_information(self, validation_type,
num_clients):
if validation_type == 'Federated':
validation_data = create_client_data(num_clients)
elif validation_type == 'Centralized':
validation_data = tf.data.Dataset.range(2)
else:
validation_data = None
test_task_data = task_data.BaselineTaskDatasets(
train_data=create_client_data(1),
test_data=create_client_data(1),
validation_data=validation_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
actual_validation_summary = data_summary[4].split()
expected_validation_summary = [
'Validation', '|{}'.format(validation_type), '|{}'.format(num_clients),
'|'
]
self.assertEqual(actual_validation_summary, expected_validation_summary)
def test_summary_table_structure_without_validation(self):
train_data = create_client_data(1)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data, test_data=train_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
self.assertLen(data_summary, 7)
table_len = len(data_summary[0])
self.assertEqual(data_summary[1], '=' * table_len)
for i in range(2, 4):
self.assertLen(data_summary[i], table_len)
self.assertEqual(data_summary[4], '_' * table_len)
def test_summary_table_structure_with_validation(self):
train_data = create_client_data(1)
test_task_data = task_data.BaselineTaskDatasets(
train_data=train_data, test_data=train_data, validation_data=train_data)
data_summary = []
test_task_data.summary(print_fn=data_summary.append)
self.assertLen(data_summary, 8)
table_len = len(data_summary[0])
self.assertEqual(data_summary[1], '=' * table_len)
for i in range(2, 5):
self.assertLen(data_summary[i], table_len)
self.assertEqual(data_summary[5], '_' * table_len)
if __name__ == '__main__':
tf.test.main()
| |
import warnings
import math
from ...maths import *
from ..core import *
from .core import *
from .common import AcceleratableElement
class _BezierBasePathElement(AcceleratableElement):
"""
An internal base class for BezierPathElements and ReparametrizableBezierPathElements.
"""
def initialize(self, **config):
control_polygon = list(map(Vector2D, config["controlPolygon"]))
if len(control_polygon) < 2:
raise ConfigurationError(
"Must have at least 2 points in controlPolygon.")
# We need to store a separate time parameter because a bezier
# curve ranges from t = 0 to 1.
initial_time = config.get("initialTime", 0)
duration = config.get("duration")
# The number of times to repeat traversal of the path.
repeats = config.get("repeatCount", 1)
self.control_polygon = control_polygon
self.duration = duration
self._time = initial_time
self._transition_time = 0
self._transition_amount = 0
self.repeats = repeats
self._current_iteration = 0
class BezierPathElement(_BezierBasePathElement):
"""
A PathElement that represents motion in a bezier curve of variable degree.
"""
def initialize(self, **config):
super().initialize(**config)
duration = self.duration
if duration is not None:
# If a duration was set, define the speed.
speed = (1 - self._time)/duration * globalSystem._timestep
else:
# Else, find the speed in the config.
if "speed" in config:
speed = config["speed"]
else:
speed = config["initialSpeed"]
self.speed = speed
def updateDisplacement(self):
"""
Update this PathElement's displacement.
"""
self.displacement = bezier(self.control_polygon, self._time)
self._time += self.speed
# Transition the speed (if necessary)
self._transition()
if self.duration is not None:
# If we have completed a single iteration.
if self._time >= 1 or self._time <= 0 and self._current_iteration < self.repeats:
self._current_iteration += 1
# Reversing the control polygon reverses the direction of traversal of
# the bezier curve.
self.control_polygon = self.control_polygon[::-1]
self._time = 0
# If we have completed all iterations.
if self._current_iteration == self.repeats:
self.done = True
# class ReparametrizableBezierPathElement(_BezierBasePathElement):
# def initialize(self, **config):
# super().initialize(**config)
# if self.duration is None:
# # This is really arbitrary. Figure out what you want to do here.
# self.duration = 1
# duration = self.duration
# arclength = bezierArclength(self.control_polygon)
# self.arclength = arclength
# reparametrization = config.get("reparametrization")
# if reparametrization == "fixed":
# # reparametrization = lambda t : 10*math.sin(t/10)
# reparametrization = lambda t : 1
# print("Here's some shit.")
# print()
# for i in range(100):
# print(bezierDerivative(self.control_polygon, i/100).magnitude())
# print()
# # Normalize the reparametrzation so that its integral is equal to the
# # arclength of the bezier curve.
# #
# # Here's a piece of confusing math. In ``normalizeParametrization`` the second
# # parameter is the arclength. So why wouldn't we want to send the arclength of
# # the bezier curve in as that parameter? Well, I'm not entirely sure.
# reparametrization = normalizeParametrization(reparametrization, 0, 1)
# step_size = arclength / duration * globalSystem._timestep
# self.reparametrization = BezierReparametrizer(
# self.control_polygon, reparametrization, step_size,
# initial=bezierArclength(self.control_polygon, self._time)
# )
# def stop(self):
# """Completely hault all motion."""
# self.reparametrization.step_size = 0
# self._transition_time = 0
# def setSpeed(self, speed):
# """Set a new speed."""
# self.reparametrization.step_size = speed * self.arclength / self.duration \
# * globalSystem._timestep
# self._transition_time = 0
# def transitionToSpeed(self, new_speed, time):
# """Smoothly transition to a new speed over a given period of time."""
# new_speed = new_speed * self.arclength / self.duration * globalSystem._timestep
# self._transition_amount = (
# new_speed - self.speed) / time * globalSystem._timestep
# self._transition_time = time
# def _transition(self):
# """
# Update the speed if we are in the middle of transitioning.
# """
# if self._transition_time > 1e-9: # Accounts for floating point errors
# self.reparametrization.step_size += self._transition_amount
# self._transition_time -= globalSystem._timestep
# def updateDisplacement(self):
# """
# Update this PathElement's displacement.
# """
# self.displacement = self.reparametrization.getNext()
# # self._time += self.reparametrization._yn
# # Transition the speed (if necessary)
# self._transition()
# # If we have completed a single iteration.
# if self._time >= 1 or self._time < 0:
# self._current_iteration += 1
# # Reversing the control polygon reverses the direction of traversal of
# # the bezier curve.
# # self.reparametrization.reverse()
# # self._time = 0
# # If we have completed all iterations.
# if self._current_iteration == self.repeats:
# self.done = True
class CompositeBezierPathElement(PathElement):
def initialize(self, **config):
self.control_polygon = list(map(Vector2D, config["controlPolygon"]))
if len(self.control_polygon) < 2:
raise ConfigurationError(
"Must have at least 2 points in controlPolygon.")
self.weight_polygon = list(map(Vector2D, config["weightPolygon"]))
if len(self.weight_polygon) != len(self.control_polygon):
raise ConfigurationError(
"weightPolygon and controlPolygon must have the same number of points.")
self.duration = config["duration"]
self._max_time = len(self.control_polygon) - 1
self._speed = self._max_time/self.duration * globalSystem._timestep
self._time = 0
self._origin = Vector2D.origin
self._current_bezier_num = 1
self._current_bezier = current_bezier = [
self.control_polygon[0],
self.control_polygon[0] + self.weight_polygon[0],
self.control_polygon[1] - self.weight_polygon[1],
self.control_polygon[1]
]
fixed_speed = config.get("fixedSpeed", False)
if fixed_speed:
warnings.warn(
"Fixed speed traversal of bezier curves is " \
"an expensive operation. Do not use in excess.")
self._distance_increment = compositeBezierArclength(
self.control_polygon, self.weight_polygon) \
/ self.duration * globalSystem._timestep
# Runge-Kutta ODE solver.
self._rkode = inverseBezier(self._current_bezier, step_size=self._distance_increment)
self.fixed_speed = fixed_speed
self.repeats = config.get("repeatCount", 1)
self._current_iteration = 0
self._reverse = False
def setOrigin(self, origin):
self._origin = origin
def getDisplacement(self):
if self.done:
return self._displacement + self._origin
self._displacement = bezier(self._current_bezier, self._time)
if self.fixed_speed:
self._time = self._rkode.getNext()
else:
if self._reverse:
self._time -= self._speed
else:
self._time += self._speed
# UGHH comment this trash later please
if (self._time < 0 and self._reverse) or (self._time > 1 and not self._reverse):
if self._reverse:
self._current_bezier_num -= 1
else:
self._current_bezier_num += 1
if self._current_bezier_num == len(self.control_polygon) or self._current_bezier_num == 0:
self._current_iteration += 1
self._reverse = not self._reverse
if self._current_iteration == self.repeats:
self.done = True
if self._reverse:
self._current_bezier_num -= 1
else:
self._current_bezier_num += 1
if self.fixed_speed:
self._rkode.reverse()
else:
n = self._current_bezier_num - 1
self._current_bezier = current_bezier = [
self.control_polygon[n],
self.control_polygon[n] + self.weight_polygon[n],
self.control_polygon[n + 1] - self.weight_polygon[n + 1],
self.control_polygon[n + 1]
]
if self.fixed_speed:
if self._reverse:
self._rkode = inverseBezier(
self._current_bezier, step_size=-self._distance_increment, initial=1)
else:
self._rkode = inverseBezier(
self._current_bezier, step_size=self._distance_increment)
if self._reverse:
self._time = 1
else:
self._time = 0
return self._displacement + self._origin
| |
import unittest, doctest
from test import test_support
from collections import namedtuple
import pickle, cPickle, copy
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__doc__, 'Point(x, y)')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assert_("u'" not in repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assert_("u'" not in repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assert_('__dict__' not in dir(p)) # verify instance has no dict
self.assert_('__weakref__' not in dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assert_(isinstance(p, tuple))
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
class TestOneTrickPonyABCs(unittest.TestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.failIf(isinstance(x, Hashable), repr(x))
self.failIf(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.failUnless(isinstance(x, Hashable), repr(x))
self.failUnless(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.failIf(issubclass(int, H))
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.failIf(isinstance(x, Iterable), repr(x))
self.failIf(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterable), repr(x))
self.failUnless(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.failIf(issubclass(str, I))
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.failIf(isinstance(x, Iterator), repr(x))
self.failIf(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterator), repr(x))
self.failUnless(issubclass(type(x), Iterator), repr(type(x)))
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Sized), repr(x))
self.failIf(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.failUnless(isinstance(x, Sized), repr(x))
self.failUnless(issubclass(type(x), Sized), repr(type(x)))
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Container), repr(x))
self.failIf(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.failUnless(isinstance(x, Container), repr(x))
self.failUnless(issubclass(type(x), Container), repr(type(x)))
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Callable), repr(x))
self.failIf(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.failUnless(isinstance(x, Callable), repr(x))
self.failUnless(issubclass(type(x), Callable), repr(type(x)))
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.failUnless(issubclass(C, B))
self.failIf(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.failIf(issubclass(C, B), B.__name__)
B.register(C)
self.failUnless(issubclass(C, B))
class TestCollectionABCs(unittest.TestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.failUnless(isinstance(sample(), Set))
self.failUnless(issubclass(sample, Set))
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.failUnless(hash(a) == hash(b))
def test_MutableSet(self):
self.failUnless(isinstance(set(), MutableSet))
self.failUnless(issubclass(set, MutableSet))
self.failIf(isinstance(frozenset(), MutableSet))
self.failIf(issubclass(frozenset, MutableSet))
def test_Mapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), Mapping))
self.failUnless(issubclass(sample, Mapping))
def test_MutableMapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), MutableMapping))
self.failUnless(issubclass(sample, MutableMapping))
def test_Sequence(self):
for sample in [tuple, list, str]:
self.failUnless(isinstance(sample(), Sequence))
self.failUnless(issubclass(sample, Sequence))
self.failUnless(issubclass(basestring, Sequence))
def test_MutableSequence(self):
for sample in [tuple, str]:
self.failIf(isinstance(sample(), MutableSequence))
self.failIf(issubclass(sample, MutableSequence))
for sample in [list]:
self.failUnless(isinstance(sample(), MutableSequence))
self.failUnless(issubclass(sample, MutableSequence))
self.failIf(issubclass(basestring, MutableSequence))
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| |
#!/usr/bin/python
#
# Copyright 2015 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import contextlib
import gettext
import json
import logging
import os
import os.path
import socket
import ssl
import sys
import urllib2
import urlparse
if sys.version_info[0] < 3:
from httplib import HTTPSConnection
from urllib2 import HTTPSHandler
from urllib2 import build_opener
else:
from http.client import HTTPSConnection
from urllib.request import HTTPSHandler
from urllib.request import build_opener
import ovirt_vmconsole_conf as config
from ovirt_engine import configfile, service, ticket
_HTTP_STATUS_CODE_SUCCESS = 200
_LOGGER_NAME = 'ovirt.engine.vmconsole.helper'
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-vmconsole-helper')
def urlopen(url, ca_certs=None, verify_host=True):
if getattr(ssl, 'create_default_context', None):
context = ssl.create_default_context()
if verify_host:
context.check_hostname = ssl.match_hostname
else:
context.check_hostname = None
if ca_certs:
context.load_verify_locations(cafile=ca_certs)
context.verify_mode = ssl.CERT_REQUIRED
else:
context.verify_mode = ssl.CERT_NONE
return contextlib.closing(
build_opener(HTTPSHandler(context=context)).open(url)
)
else:
class MyHTTPSConnection(HTTPSConnection):
def __init__(self, host, **kwargs):
self._ca_certs = kwargs.pop('ca_certs', None)
HTTPSConnection.__init__(self, host, **kwargs)
def connect(self):
self.sock = ssl.wrap_socket(
socket.create_connection((self.host, self.port)),
cert_reqs=(
ssl.CERT_REQUIRED if self._ca_certs
else ssl.CERT_NONE
),
ca_certs=self._ca_certs,
)
if verify_host:
cert = self.sock.getpeercert()
for field in cert.get('subject', []):
if field[0][0] == 'commonName':
expected = field[0][1]
break
else:
raise RuntimeError(
_('No CN in peer certificate')
)
if expected != self.host:
raise RuntimeError(
_(
"Invalid host '{host}' "
"expected '{expected}'"
).format(
expected=expected,
host=self.host,
)
)
class MyHTTPSHandler(HTTPSHandler):
def __init__(self, ca_certs=None):
HTTPSHandler.__init__(self)
self._ca_certs = ca_certs
def https_open(self, req):
return self.do_open(self._get_connection, req)
def _get_connection(self, host, timeout):
return MyHTTPSConnection(
host=host,
timeout=timeout,
ca_certs=self._ca_certs,
)
return contextlib.closing(
build_opener(MyHTTPSHandler(ca_certs=ca_certs)).open(url)
)
def make_ticket_encoder(cfg_file):
return ticket.TicketEncoder(
cfg_file.get('TOKEN_CERTIFICATE'),
cfg_file.get('TOKEN_KEY'),
)
def parse_args():
parser = argparse.ArgumentParser(
description='ovirt-vmconsole-proxy helper tool')
parser.add_argument(
'--debug', default=False, action='store_true',
help='enable debug log',
)
parser.add_argument(
'--version', metavar='V', type=int, nargs='?', default=1,
help='version of the protocol to use',
)
subparsers = parser.add_subparsers(
dest='entity',
help='subcommand help',
)
parser_consoles = subparsers.add_parser(
'consoles',
help='list available consoles',
)
parser_consoles.add_argument(
'--entityid', nargs='?', type=str, default='',
help='entity ID where needed',
)
parser_keys = subparsers.add_parser(
'keys',
help='list available keys',
)
parser_keys.add_argument(
'--keyfp', nargs='?', type=str, default='',
help='list only the keys matching the given fingerprint',
)
parser_keys.add_argument(
'--keytype', nargs='?', type=str, default='',
help='list only the keys matching the given key type (e.g. ssh-rsa)',
)
parser_keys.add_argument(
'--keycontent', nargs='?', type=str, default='',
help='list only the keys matching the given content',
)
return parser.parse_args()
def make_request(args):
if args.entity == 'keys':
return {
'command': 'public_keys',
'version': args.version,
'key_fp': args.keyfp,
'key_type': args.keytype,
'key_content': args.keycontent,
}
elif args.entity == 'consoles':
if args.entityid is None:
raise ValueError('entityid required and not found')
return {
'command': 'available_consoles',
'version': args.version,
'user_id': args.entityid,
}
else:
raise ValueError('unknown entity: %s', args.entity)
def handle_response(res_string):
if not res_string:
return res_string
res_obj = json.loads(res_string)
# fixup types as ovirt-vmconsole-proxy-keys expects them
res_obj['version'] = int(res_obj['version'])
for con in res_obj.get('consoles', []):
# fixup: servlet uses 'vmname' to reduce ambiguity;
# ovirt-vmconsole-* however, expects 'vm'.
con['vm'] = con['vmname']
# fixup: to avoid name clashes between VMs
# .sock suffix is for clarity
con['console'] = '%s.sock' % con['vmid']
return json.dumps(res_obj)
def main():
service.setupLogger()
logger = logging.getLogger(_LOGGER_NAME)
try:
args = parse_args()
cfg_file = configfile.ConfigFile([
config.VMCONSOLE_PROXY_HELPER_DEFAULTS,
config.VMCONSOLE_PROXY_HELPER_VARS,
])
if cfg_file.getboolean('DEBUG') or args.debug:
logger.setLevel(logging.DEBUG)
base_url = (
# debug, emergency override
os.getenv('OVIRT_VMCONSOLE_ENGINE_BASE_URL') or
cfg_file.get('ENGINE_BASE_URL')
)
logger.debug('using engine base url: %s', base_url)
enc = make_ticket_encoder(cfg_file)
data = enc.encode(json.dumps(make_request(args)))
req = urllib2.Request(
urlparse.urljoin(base_url, 'services/vmconsole-proxy'),
data=data,
headers={
'Content-Type': 'text/plain',
'Content-Length': len(data),
},
)
ca_certs = cfg_file.get('ENGINE_CA')
if not ca_certs:
logger.warn('Engine CA not configured, '
'connecting in insecure mode')
ca_certs = None
with urlopen(
url=req,
ca_certs=ca_certs,
verify_host=cfg_file.getboolean('ENGINE_VERIFY_HOST')
) as res:
if res.getcode() != _HTTP_STATUS_CODE_SUCCESS:
raise RuntimeError(
'Engine call failed: code=%d' % res.getcode()
)
print(handle_response(res.read()))
except Exception as ex:
logger.error('Error: %s', ex)
logger.debug('Exception', exc_info=True)
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| |
from unittest import TestCase
from dark.blast.hsp import normalizeHSP
class Frame(object):
def __init__(self, read, hit):
self.read = read
self.hit = hit
class FakeHSP(dict):
def __init__(self, subjectStart, subjectEnd, readStart, readEnd, frame,
hit='', read=''):
"""
A fake HSP class (with 1-based offsets, as are used in BLAST).
"""
self['sbjct_start'] = subjectStart
self['sbjct_end'] = subjectEnd
self['query_start'] = readStart
self['query_end'] = readEnd
self['frame'] = (frame.read, frame.hit)
self['sbjct'] = hit
self['query'] = read
# In case you're thinking of adding it, the following assertion is
# not valid:
#
# assert abs(subjectEnd - subjectStart) == abs(readEnd - readStart)
#
# That's because BLAST might find a match that requires a gap in
# the read or in the hit. The indices that it reports do not
# include the gap and so the differences in the lengths of the
# sections of the read and hit may not be the same.
class Template(object):
def __init__(self, template):
template = template.split('\n')
# Allow the first template line to be empty.
if len(template[0]) == 0:
template = template[1:]
# Analyze the template hit.
self.hit = template[0].rstrip()
(spacesLen, leadingDotsLen, matchLen, trailingDotsLen, positive) = \
self._analyze(self.hit)
origin = spacesLen
self.matchLen = matchLen
self.subjectLength = len(self.hit) - spacesLen
self.hitMatchStart = leadingDotsLen
self.hitPositive = positive
# Analyze the template read.
self.read = template[1].rstrip()
(spacesLen, leadingDotsLen, matchLen, trailingDotsLen, positive) = \
self._analyze(self.read)
assert self.matchLen == matchLen
self.readLen = len(self.read) - spacesLen
self.readMatchStart = leadingDotsLen
self.readPositive = positive
# Analyze the template read result.
self.readResult = template[2].rstrip()
(spacesLen, leadingDotsLen, matchLen, trailingDotsLen, positive) = \
self._analyze(self.readResult)
assert self.matchLen == matchLen
self.readResultStart = spacesLen - origin
self.readResultLen = len(self.readResult) - spacesLen
assert self.readResultLen == self.readLen
self.readResultMatchStart = leadingDotsLen
def leadingCharMatchLen(self, str, chars=' '):
return len(str) - len(str.lstrip(chars))
def _analyze(self, str):
offset = spacesLen = self.leadingCharMatchLen(str)
leadingDotsLen = self.leadingCharMatchLen(str[offset:], '.')
offset += leadingDotsLen
assert str[offset] in ('>', '<'), 'Oops: "%s"' % str[leadingDotsLen]
positive = str[offset] == '>'
matchLen = self.leadingCharMatchLen(str[offset:], '<>')
offset += matchLen
trailingDotsLen = self.leadingCharMatchLen(str[offset:], '.')
return (spacesLen, leadingDotsLen, matchLen, trailingDotsLen, positive)
def hsp(self):
"""
Make an HSP. Use 1-based offsets.
"""
return FakeHSP(subjectStart=self.hitMatchStart + 1,
subjectEnd=self.hitMatchStart + self.matchLen,
readStart=self.readMatchStart + 1,
readEnd=self.readMatchStart + self.matchLen,
frame=Frame(
1 if self.readPositive else -1,
1 if self.hitPositive else -1,
),
# Make non-random non-gapped read and hits.
hit='a' * self.matchLen,
read='a' * self.matchLen)
class TestTemplate(TestCase):
"""
Tests for our helper Template class.
"""
def testIt(self):
template = Template('''
....>>>...........
..<<<...............
...............>>>..
''')
self.assertEqual(3, template.matchLen)
self.assertTrue(template.hitPositive)
self.assertEqual(18, template.subjectLength)
self.assertEqual(4, template.hitMatchStart)
self.assertFalse(template.readPositive)
self.assertEqual(20, template.readLen)
self.assertEqual(2, template.readMatchStart)
self.assertEqual(20, template.readResultLen)
self.assertEqual(-11, template.readResultStart)
self.assertEqual(15, template.readResultMatchStart)
hsp = template.hsp()
self.assertEqual(5, hsp['sbjct_start'])
self.assertEqual(7, hsp['sbjct_end'])
self.assertEqual(3, hsp['query_start'])
self.assertEqual(5, hsp['query_end'])
self.assertEqual((-1, 1), hsp['frame'])
class TestNormalizeHSPMixin(object):
"""
Tests for normalizeHSP when the read and hit are both positive.
"""
def check(self, templateStr):
template = Template(templateStr)
normalized = normalizeHSP(template.hsp(), template.readLen, 'blastn')
self.assertEqual({
'subjectStart': template.hitMatchStart,
'subjectEnd': template.hitMatchStart + template.matchLen,
'readStart': template.readMatchStart,
'readEnd': template.readMatchStart + template.matchLen,
'readStartInSubject': template.readResultStart,
'readEndInSubject': template.readResultStart + template.readLen,
}, normalized)
class HitPositiveReadPositive(TestCase, TestNormalizeHSPMixin):
def testIdentical1(self):
self.check('''
>
>
>
''')
def testIdentical2(self):
self.check('''
....>>>>...
....>>>>...
....>>>>...
''')
def testHitExtendsLeft1(self):
self.check('''
....>>
>>
>>
''')
def testHitExtendsLeft2(self):
self.check('''
....>>>...
>>>...
>>>...
''')
def testHitExtendsRight1(self):
self.check('''
>>......
>>...
>>...
''')
def testHitExtendsRight2(self):
self.check('''
..>>>>......
..>>>>...
..>>>>...
''')
def testHitExtendsBoth(self):
self.check('''
....>>>...........
..>>>....
..>>>....
''')
def testReadExtendsLeft1(self):
self.check('''
>>
....>>
....>>
''')
def testReadExtendsLeft2(self):
self.check('''
>>>...
....>>>...
....>>>...
''')
def testReadExtendsRight1(self):
self.check('''
>>...
>>......
>>......
''')
def testReadExtendsRight2(self):
self.check('''
..>>>>...
..>>>>......
..>>>>......
''')
def testReadExtendsBoth(self):
self.check('''
..>>>....
....>>>...........
....>>>...........
''')
def testHitExtendsLeftReadExtendsRight(self):
self.check('''
....>>>...........
..>>>...............
..>>>...............
''')
def testHitExtendsRightReadExtendsLeft(self):
self.check('''
..>>>...............
....>>>...........
....>>>...........
''')
class HitPositiveReadNegative(TestCase, TestNormalizeHSPMixin):
"""
This class appears to not be needed. As far as we have seen, the
read is always positive with ascending start, end offsets.
"""
def testIdentical1(self):
self.check('''
>
<
>
''')
def testIdentical2(self):
self.check('''
>>>>
<<<<
>>>>
'''),
def testHitExtendsLeft1(self):
self.check('''
....>>>
..<<<
>>>..
''')
def testHitExtendsLeft2(self):
self.check('''
....>>>...........
..<<<...........
...........>>>..
''')
def testHitExtendsRight1(self):
self.check('''
>>>....
<<<..
..>>>
''')
def testHitExtendsBoth(self):
self.check('''
......>>>...........
..<<<...
...>>>..
''')
def testReadExtendsLeft1(self):
self.check('''
....>>>
......<<<
>>>......
''')
def testReadExtendsLeft2(self):
self.check('''
....>>>...........
......<<<...........
...........>>>......
''')
def testReadExtendsRight1(self):
self.check('''
>>>
<<<..
..>>>
''')
def testReadExtendsRight2(self):
self.check('''
....>>>
....<<<..
..>>>....
''')
def testReadExtendsBoth(self):
self.check('''
....>>>...........
......<<<...............
...............>>>......
''')
def testHitExtendsLeftReadExtendsRight(self):
self.check('''
....>>>...........
..<<<...............
...............>>>..
''')
def testHitExtendsRightReadExtendsLeft(self):
self.check('''
..<<<...............
....>>>...........
...........>>>....
''')
class HitNegativeReadPositive(TestCase, TestNormalizeHSPMixin):
def testIdentical1(self):
self.check('''
<
>
>
''')
def testIdentical2(self):
self.check('''
<<<<
>>>>
>>>>
''')
def testHitExtendsLeft1(self):
self.check('''
....<<<
..>>>
>>>..
''')
def testHitExtendsLeft2(self):
self.check('''
....<<<...........
..>>>...........
...........>>>..
''')
def testHitExtendsRight1(self):
self.check('''
<<<.................
>>>...........
...........>>>
''')
def testHitExtendsRight2(self):
self.check('''
....<<<.................
....>>>...........
...........>>>....
''')
def testHitExtendsBoth(self):
self.check('''
......<<<...........
..>>>...
...>>>..
''')
def testReadExtendsLeft1(self):
self.check('''
..<<<
....>>>
>>>....
''')
def testReadExtendsLeft2(self):
self.check('''
..<<<...........
....>>>...........
...........>>>....
''')
def testReadExtendsRight1(self):
self.check('''
<<<...........
>>>.................
.................>>>
''')
def testReadExtendsRight2(self):
self.check('''
....<<<...........
....>>>.................
.................>>>....
''')
def testReadExtendsBoth(self):
self.check('''
......<<<...........
..>>>...
...>>>..
''')
def testHitExtendsLeftReadExtendsRight(self):
self.check('''
....<<<...........
..>>>...............
...............>>>..
''')
def testHitExtendsRightReadExtendsLeft(self):
self.check('''
..<<<...............
....>>>...........
...........>>>....
''')
class HitNegativeReadNegative(TestCase, TestNormalizeHSPMixin):
"""
This class appears to not be needed. As far as we have seen, the
read is always positive with ascending start, end offsets.
"""
def testIdentical1(self):
self.check('''
<
<
>
''')
def testIdentical2(self):
self.check('''
<<<<
<<<<
>>>>
''')
def testHitExtendsLeft1(self):
self.check('''
....<<<
..<<<
..>>>
''')
def testHitExtendsLeft2(self):
self.check('''
....<<<...........
..<<<...........
..>>>...........
''')
def testHitExtendsRight1(self):
self.check('''
<<<.................
<<<...........
<<<...........
''')
def testHitExtendsRight2(self):
self.check('''
....<<<.................
....<<<...........
....<<<...........
''')
def testHitExtendsBoth(self):
self.check('''
......<<<...........
..<<<...
..<<<...
''')
def testReadExtendsLeft1(self):
self.check('''
..<<<
....<<<
....<<<
''')
def testReadExtendsLeft2(self):
self.check('''
..<<<...........
....<<<...........
....<<<...........
''')
def testReadExtendsRight1(self):
self.check('''
<<<...........
<<<.................
<<<.................
''')
def testReadExtendsRight2(self):
self.check('''
....<<<...........
....<<<.................
....<<<.................
''')
def testReadExtendsBoth(self):
self.check('''
......<<<...........
..<<<...
..<<<...
''')
def testHitExtendsLeftReadExtendsRight(self):
self.check('''
....<<<...........
..<<<...............
..<<<...............
''')
def testHitExtendsRightReadExtendsLeft(self):
self.check('''
..<<<...............
....<<<...........
....<<<...........
''')
class Old_ReadPositiveHitPositive(TestCase):
"""
Tests for normalizeHSP when the hit start is less than the hit end.
NOTE: Please don't add any tests below. Use the Template based tests above.
"""
frame = Frame(read=1, hit=1)
def testIdentical(self):
"""The hit start and end are identical to those of the read.
ssss
qqqq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 4, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testHitExtendsLeft(self):
"""The hit overlaps the read to the left.
ssssss
qqqq
"""
hsp = FakeHSP(subjectStart=3, subjectEnd=6, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 4, 'blastn')
self.assertEqual({
'subjectStart': 2,
'subjectEnd': 6,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 2,
'readEndInSubject': 6,
}, normalized)
def testReadExtendsLeft(self):
"""
The read sticks out to the left of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, readStart=3, readEnd=6,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': -2,
'readEndInSubject': 4,
}, normalized)
def testReadExtendsRight(self):
"""The read sticks out to the right of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testReadExtendsRightAndLeft(self):
"""The read extends to the right and left of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, readStart=2, readEnd=5,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 1,
'readEnd': 5,
'readStartInSubject': -1,
'readEndInSubject': 5,
}, normalized)
def testHitExtendsRightAndLeft(self):
"""The hit extends to the right and left of the read.
sssssss
qqqq
"""
hsp = FakeHSP(subjectStart=2, subjectEnd=5, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 4, 'blastn')
self.assertEqual({
'subjectStart': 1,
'subjectEnd': 5,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 1,
'readEndInSubject': 5,
}, normalized)
def test20131115Debugging(self):
"""
This is an example I manually examined for Barbara on 2013-11-15.
"""
read = 'TTCTTTTTGCATTTGATAGT-TTGCTACAAG'
hit = 'TTCTTTTTGCAATAGTCAGTCTTGCTAAAAG'
hsp = FakeHSP(subjectStart=45, subjectEnd=75, readStart=120,
readEnd=149, frame=self.frame, read=read, hit=hit)
normalized = normalizeHSP(hsp, 149, 'blastn')
self.assertEqual({
'subjectStart': 44,
'subjectEnd': 75,
'readStart': 119,
'readEnd': 149,
'readStartInSubject': -75,
'readEndInSubject': 75,
}, normalized)
class Old_ReadPositiveHitNegative(TestCase):
"""
Tests for normalizeHSP when the hit start is greater than the hit
end.
NOTE: Please don't add any tests below. Use the Template based tests above.
"""
frame = Frame(read=1, hit=-1)
def testIdentical(self):
"""
The hit start and end are identical to those of the read.
ssss
qqqq
"""
hsp = FakeHSP(subjectStart=4, subjectEnd=1, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 4, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testReadExtendsLeft(self):
"""The read sticks out to the left of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=4, subjectEnd=1, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': -2,
'readEndInSubject': 4,
}, normalized)
def testReadExtendsLeft2(self):
"""The read sticks out to the left of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=4, subjectEnd=1, readStart=3, readEnd=6,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testReadExtendsRight(self):
"""The read sticks out to the right of the hit.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=4, subjectEnd=1, readStart=3, readEnd=6,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testReadExtendsRight2(self):
"""The read sticks out to the right of the read.
ssss
qqqqqq
"""
hsp = FakeHSP(subjectStart=2, subjectEnd=1, readStart=5, readEnd=6,
frame=self.frame)
normalized = normalizeHSP(hsp, 6, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 2,
'readStart': 4,
'readEnd': 6,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testReadExtendsRightAndLeft(self):
"""The read extends to the right and left of the hit.
ssss
qqqqqqq
"""
hsp = FakeHSP(subjectStart=4, subjectEnd=1, readStart=3, readEnd=6,
frame=self.frame)
normalized = normalizeHSP(hsp, 7, 'blastn')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': -1,
'readEndInSubject': 6,
}, normalized)
def testHitExtendsRightAndLeft(self):
"""The hit extends to the right and left of the read.
sssssss
qqqq
"""
hsp = FakeHSP(subjectStart=5, subjectEnd=2, readStart=1, readEnd=4,
frame=self.frame)
normalized = normalizeHSP(hsp, 4, 'blastn')
self.assertEqual({
'subjectStart': 1,
'subjectEnd': 5,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 1,
'readEndInSubject': 5,
}, normalized)
def test20130721Debugging(self):
"""
This is an example I manually examined on 2013-07-21.
I had to invent hit and read strings though on 2013-11-21 to
introduce 5 gaps in the read due to more rigorous checking in
normalizeHSP.
"""
hit = (
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'AAAAA')
read = (
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGT'
'-----')
hsp = FakeHSP(subjectStart=9018, subjectEnd=8764, readStart=66,
readEnd=315, frame=self.frame, hit=hit, read=read)
normalized = normalizeHSP(hsp, 316, 'blastn')
self.assertEqual({
'subjectStart': 8763,
'subjectEnd': 9018,
'readStart': 65,
'readEnd': 315,
'readStartInSubject': 8762,
'readEndInSubject': 9083,
}, normalized)
def test20131113Debugging(self):
"""
This is an example I manually examined on 2013-11-13.
"""
hit = (
'GTCGAGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGTGTCCGGCA'
'AGGTTGCCAAGGAGCAGATCGACATCGATAACGCCAAGCACACCAAGTGATGCACTGA'
'CGACGGGTGAGGCCCAGATTCCTACGGCCTGGGCCTCTGTCTGCGTCGGGATGCCATT'
'AGGCCGGTAGGATCGGTCACATGATCGATCCCAAGCTCCTGCGAACGGATCCGGACGC'
'CGTTCGTCGCTCCCAGGCCGCCCGCGGCGAGGACTCCTCGGTTGTGGACGACGTTGTC'
'GCCGCAGATGAGGCTCGTCGTGAGGCTATTGCTGCCCATGAGAACCTGCGTGCAGAAC'
'AGAAGGGACTCGGCAAGCGAATCGCTAAAGCATCCGGTG')
read = (
'GTC-AGAAGATCAAGATTGGTAAGGAGGCCGTGCAGGACACCGAGACCGTGTCCGGCA'
'AGGTTGCCAAGGAGCAGATCGACATCGATAACGCCAAGCACACCAAGTGATGCACTGA'
'CGACGGGTGAGGCCCAGATTCCTACGGCCTGGGCCTCTGTCTGCGTCGGGATGCCATT'
'AGGCCGCTAGGATCGGTCACATGATCGATCCCAAGCTCCTGCGAACGGATCCGGACGC'
'CGTTCGTCGCTCCCAGGCCGCCCGCGGCGAGGACTCCTCGGTTGTGGACGACGTTGTC'
'GCCGCAGATGAGGCTCGTCGTGAGGCTATTGCTGCCCATGAGAACCTGCGTGCAGAAC'
'AGAAGGGACTCGGCAAGCGAATCGCTAAAGCATCCGGTG')
hsp = FakeHSP(subjectStart=2339751, subjectEnd=2339365, readStart=1,
readEnd=386, frame=self.frame, hit=hit, read=read)
normalized = normalizeHSP(hsp, 396, 'blastn')
self.assertEqual({
'subjectStart': 2339364,
'subjectEnd': 2339751,
'readStart': 0,
'readEnd': 386,
'readStartInSubject': 2339354,
'readEndInSubject': 2339751,
}, normalized)
def test20131115Debugging(self):
"""
This is an example I manually examined for BM on 2013-11-15.
"""
read = 'CTCTTGCA-CCTTAGGTACC'
hit = 'CTCTAGCAGCCTTAGGTACC'
hsp = FakeHSP(subjectStart=1776, subjectEnd=1795, readStart=131,
readEnd=149, frame=self.frame, read=read, hit=hit)
normalized = normalizeHSP(hsp, 149, 'blastn')
self.assertEqual({
'subjectStart': 1775,
'subjectEnd': 1795,
'readStart': 130,
'readEnd': 149,
'readStartInSubject': 1775,
'readEndInSubject': 1925,
}, normalized)
| |
import traceback
from django.contrib.auth.decorators import login_required, permission_required
from django.db.models.aggregates import Sum
from django.shortcuts import render
from WhatManager2 import manage_torrent
from WhatManager2.settings import MIN_FREE_DISK_SPACE, MIN_WHAT_RATIO
from WhatManager2.templatetags.custom_filters import filesizeformat
from WhatManager2.utils import json_return_method, html_unescape, get_artists
from home.models import DownloadLocation, LogEntry, ReplicaSet, WhatTorrent, get_what_client
from queue.models import QueueItem, filter_group, filter_torrent, is_existing
from what_profile.models import WhatUserSnapshot
def get_auto_pop_ratio_delta(snapshot):
return snapshot.uploaded / MIN_WHAT_RATIO - snapshot.downloaded
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
def pop_remove(request):
front = QueueItem.get_front()
if not front:
return {
'success': False,
'message': 'Queue is empty.'
}
front.delete()
return {
'success': True
}
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
@json_return_method
def auto_pop(request):
front = QueueItem.get_front()
if not front:
LogEntry.add(request.user, u'info', 'Auto pop: queue is empty.')
return {
'success': False,
'error': 'Queue is empty.'
}
try:
ratio_delta = get_auto_pop_ratio_delta(WhatUserSnapshot.get_last())
except WhatUserSnapshot.DoesNotExist:
LogEntry.add(request.user, u'info', 'Auto pop: User profile not updated, skipping pop.')
return {
'success': False,
'error': u'User profile not updated, skipping pop.'
}
if ratio_delta >= front.torrent_size:
return do_pop(request)
else:
message = u'Auto pop: ratio delta {0} < {1}, skipping pop.'.format(
filesizeformat(ratio_delta),
filesizeformat(front.torrent_size)
)
LogEntry.add(request.user, u'info', message)
return {
'success': False,
'error': u'Buffer is {0}, skipping pop.'.format(message)
}
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
@json_return_method
def do_pop(request):
download_location = DownloadLocation.get_what_preferred()
if download_location.free_space_percent < MIN_FREE_DISK_SPACE:
LogEntry.add(request.user, u'error', u'Failed to add torrent. Not enough disk space.')
return {
'success': False,
'error': u'Not enough free space on disk.'
}
front = QueueItem.get_front()
if not front:
return {
'success': False,
'message': 'Queue is empty.'
}
instance = ReplicaSet.get_what_master().get_preferred_instance()
if WhatTorrent.is_downloaded(request, what_id=front.what_id):
front.delete()
return {
'success': True,
'message': 'Already added.'
}
try:
m_torrent = manage_torrent.add_torrent(request, instance, download_location, front.what_id)
m_torrent.what_torrent.added_by = request.user
m_torrent.what_torrent.tags = 'seed project'
m_torrent.what_torrent.save()
front.delete()
LogEntry.add(request.user, u'action', u'Popped {0} from queue.'.format(m_torrent))
except Exception as ex:
tb = traceback.format_exc()
LogEntry.add(request.user, u'error',
u'Tried popping what_id={0} from queue. Error: {1}'.format(front.what_id,
unicode(ex)), tb)
return {
'success': False,
'error': unicode(ex),
'traceback': tb
}
return {
'success': True
}
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
def queue_pop(request):
data = {
'front': QueueItem.get_front()
}
return render(request, 'queue/part_ui/queue_pop.html', data)
@login_required
def queue_stats(request):
ratio_delta = '-'
try:
ratio_delta = get_auto_pop_ratio_delta(WhatUserSnapshot.get_last())
except (WhatUserSnapshot.DoesNotExist, IndexError):
pass
data = {
'item_count': QueueItem.objects.count(),
'total_size': QueueItem.objects.aggregate(Sum('torrent_size'))['torrent_size__sum'],
'auto_pop_ratio_delta': ratio_delta,
}
return render(request, 'queue/part_ui/queue_stats.html', data)
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
@json_return_method
def add_artist(request, artist_name):
what_client = get_what_client(request)
response = what_client.request('artist', artistname=artist_name)['response']
added = 0
for group in response['torrentgroup']:
if filter_group(response['name'], group):
artist = html_unescape(response['name'])
title = html_unescape(group['groupName'])
release_type = group['releaseType']
for torrent in group['torrent']:
id = torrent['id']
priority = filter_torrent(group, torrent)
if priority and not is_existing(id):
format = torrent['format']
encoding = torrent['encoding']
torrent_size = torrent['size']
queue_item = QueueItem(
what_id=id,
priority=priority,
artist=artist,
title=title,
release_type=release_type,
format=format,
encoding=encoding,
torrent_size=torrent_size
)
queue_item.save()
added += 1
return {
'success': True,
'added': added
}
@login_required
@permission_required('queue.add_queueitem', raise_exception=True)
@json_return_method
def add_collage(request, collage_id):
what_client = get_what_client(request)
response = what_client.request('collage', id=collage_id)['response']
added = 0
torrent_group_count = 0
torrent_count = 0
for group in response['torrentgroups']:
if group['categoryId'] not in [1, '1']:
continue
artist = get_artists(group)
title = html_unescape(group['name'])
release_type = group['releaseType']
for torrent in group['torrents']:
what_id = torrent['torrentid']
priority = filter_torrent(group, torrent)
if priority and not is_existing(what_id):
torrent_format = torrent['format']
encoding = torrent['encoding']
torrent_size = torrent['size']
queue_item = QueueItem(
what_id=what_id,
priority=priority,
artist=artist,
title=title,
release_type=release_type,
format=torrent_format,
encoding=encoding,
torrent_size=torrent_size
)
queue_item.save()
added += 1
torrent_count += 1
torrent_group_count += 1
return {
'success': True,
'added': added,
'groups': torrent_group_count,
'torrents': torrent_count
}
| |
#!/usr/bin/env python
"""igcollect - PostgreSQL
Copyright (c) 2019 InnoGames GmbH
"""
from argparse import ArgumentParser
from time import time
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.extras import RealDictCursor
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='postgres')
parser.add_argument('--dbname', default='postgres')
parser.add_argument('--extended', action='store_true')
return parser.parse_args()
def main():
args = parse_args()
conn = connect(database=args.dbname)
conn.set_session(
isolation_level=ISOLATION_LEVEL_REPEATABLE_READ,
readonly=True,
)
# To be formatted 2 times
template = '{}.{{}}.{{}} {{}} {}'.format(args.prefix, int(time()))
# Database statistics
for line in execute(conn, (
'SELECT pg_database_size(d.oid) as size,'
' s.numbackends,'
' s.xact_commit,'
' s.xact_rollback,'
' s.blks_read,'
' s.blks_hit,'
' s.tup_returned,'
' s.tup_fetched,'
' s.tup_inserted,'
' s.tup_deleted,'
' s.tup_updated,'
' s.conflicts,'
' s.temp_files,'
' s.temp_bytes,'
' s.deadlocks,'
' s.blk_read_time,'
' s.blk_write_time'
' FROM pg_database AS d'
' JOIN pg_stat_database AS s USING (datname)'
' WHERE d.datname = %s'
), (args.dbname,)):
for key, value in line.items():
if value is not None:
print(template.format('database', key, value))
# Table statistics
for line in execute(conn, (
'SELECT sum(seq_scan) AS seq_scan,'
' sum(seq_tup_read) AS seq_tup_read,'
' sum(idx_scan) AS idx_scan,'
' sum(idx_tup_fetch) AS idx_tup_fetch,'
' sum(n_tup_ins) AS tup_ins,'
' sum(n_tup_upd) AS tup_upd,'
' sum(n_tup_del) AS tup_del,'
' sum(n_tup_hot_upd) AS tup_hot_upd,'
' sum(n_live_tup) AS live_tup,'
' sum(n_dead_tup) AS dead_tup,'
' sum(vacuum_count) AS vacuum_count,'
' sum(autovacuum_count) AS autovacuum_count,'
' sum(analyze_count) AS analyze_count,'
' sum(autoanalyze_count) AS autoanalyze_count'
' FROM pg_stat_all_tables'
)):
for key, value in line.items():
if value is not None:
print(template.format('tables', key, value))
# Connection counts
for line in execute(conn, (
"SELECT state, count(*)"
' FROM pg_stat_activity'
' GROUP BY state'
)):
if line['state']:
key = line['state'].replace(' ', '_')
print(template.format('activity', key, line['count']))
if args.extended:
# Per relations statistics:
rel_stat_tables = ['pg_stat_all_tables',
'pg_statio_all_tables',
'pg_stat_all_indexes',
'pg_statio_all_indexes',
]
for stat_table in rel_stat_tables:
for line in execute(conn, (
'SELECT * FROM {}'.format(stat_table)
)):
for key, value in line.items():
if (key not in ['schemaname',
'relname',
'relid',
'pid',
'indexrelname',
'indexrelid']
and value):
postfix = '{}.{}.{}'.format(stat_table,
line['schemaname'],
line['relname'],)
if 'indexrelname' in line:
postfix = '{}.{}.{}.{}'.format(stat_table,
line['schemaname'],
line['relname'],
line['indexrelname'],
)
print(template.format(postfix, key, value))
# bgwriter (checkpoints)
for line in execute(conn, (
'SELECT * FROM pg_stat_bgwriter'
)):
for key, value in line.items():
print(template.format('bgwriter', key, value))
# table size
for line in execute(conn, ('''
SELECT c.relname, pg_total_relation_size(c.oid)
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'm') AND n.nspname NOT IN ('pg_catalog', 'information_schema')
ORDER BY pg_total_relation_size(c.oid) DESC;
''')):
print(template.format('table_size', line['relname'], line['pg_total_relation_size']))
# Autovacuum
for line in execute(conn, ('''
SELECT relid::regclass::text as table,
phase,
heap_blks_total,
heap_blks_scanned,
heap_blks_vacuumed,
index_vacuum_count,
max_dead_tuples,
num_dead_tuples
FROM pg_stat_progress_vacuum
WHERE datname = %s
'''), (args.dbname,)):
postfix = '{}.{}.{}.{}'.format('vacuum',
'tables',
line['table'],
line['phase'])
for key, value in line.items():
if key not in ['table', 'phase'] and value is not None:
print(template.format(postfix, key, value))
# Autovacuum wraparound protection on tables
# https://www.cybertec-postgresql.com/en/autovacuum-wraparound-protection-in-postgresql/
for line in execute(conn, ('''
SELECT
oid::regclass::text AS table,
least(
(SELECT setting::int
FROM pg_settings
WHERE name = 'autovacuum_freeze_max_age')
- age(relfrozenxid),
(SELECT setting::int
FROM pg_settings
WHERE name = 'autovacuum_multixact_freeze_max_age')
- mxid_age(relminmxid)
) AS value
FROM pg_class
WHERE relfrozenxid != 0
AND oid > 16384''')):
postfix = '{}.{}.{}'.format('vacuum',
'tables',
line['table'])
print(template.format(postfix, 'tx_before_wraparound_vacuum',
line['value']))
# Locks
for line in execute(conn, (
'SELECT mode, count(1) as value FROM pg_locks GROUP BY mode'
)):
postfix = '{}.{}'.format('database',
'locks')
print(template.format(postfix, line['mode'], line['value']))
# Archiver
for line in execute(conn, (
'SELECT * FROM pg_stat_archiver'
)):
postfix = '{}.{}.{}'.format('database',
'wal',
'archiver')
for key, value in line.items():
if value is not None:
print(template.format(postfix, key, value))
# Replication
for line in execute(conn, (
'SELECT client_hostname as hostname, '
'EXTRACT(EPOCH FROM replay_lag) as replay_lag '
'FROM pg_stat_replication'
)):
postfix = '{}.{}'.format('replication',
'replay_lag',
)
print(template.format(postfix, line['hostname'].replace('.', '_'),
line['replay_lag']))
def execute(conn, query, query_vars=()):
"""Execute given query and return fetched results"""
with conn.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute(query, query_vars)
return cursor.fetchall()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
Seamless Polymorphic Inheritance for Django Models
==================================================
Please see README.rst and DOCS.rst for further information.
Or on the Web:
http://chrisglass.github.com/django_polymorphic/
http://github.com/chrisglass/django_polymorphic
Copyright:
This code and affiliated files are (C) by Bert Constantin and individual contributors.
Please see LICENSE and AUTHORS for more information.
"""
from __future__ import absolute_import
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from .base import PolymorphicModelBase
from .manager import PolymorphicManager
from .query_translate import translate_polymorphic_Q_object
###################################################################################
### PolymorphicModel
class PolymorphicModel(six.with_metaclass(PolymorphicModelBase, models.Model)):
"""
Abstract base class that provides polymorphic behaviour
for any model directly or indirectly derived from it.
For usage instructions & examples please see documentation.
PolymorphicModel declares one field for internal use (polymorphic_ctype)
and provides a polymorphic manager as the default manager
(and as 'objects').
PolymorphicModel overrides the save() and __init__ methods.
If your derived class overrides any of these methods as well, then you need
to take care that you correctly call the method of the superclass, like:
super(YourClass,self).save(*args,**kwargs)
"""
# for PolymorphicModelBase, so it can tell which models are polymorphic and which are not (duck typing)
polymorphic_model_marker = True
# for PolymorphicQuery, True => an overloaded __repr__ with nicer multi-line output is used by PolymorphicQuery
polymorphic_query_multiline_output = False
class Meta:
abstract = True
# avoid ContentType related field accessor clash (an error emitted by model validation)
polymorphic_ctype = models.ForeignKey(ContentType, null=True, editable=False,
related_name='polymorphic_%(app_label)s.%(class)s_set')
# some applications want to know the name of the fields that are added to its models
polymorphic_internal_model_fields = ['polymorphic_ctype']
# Note that Django 1.5 removes these managers because the model is abstract.
# They are pretended to be there by the metaclass in PolymorphicModelBase.get_inherited_managers()
objects = PolymorphicManager()
base_objects = models.Manager()
@classmethod
def translate_polymorphic_Q_object(self_class, q):
return translate_polymorphic_Q_object(self_class, q)
def pre_save_polymorphic(self):
"""Normally not needed.
This function may be called manually in special use-cases. When the object
is saved for the first time, we store its real class in polymorphic_ctype.
When the object later is retrieved by PolymorphicQuerySet, it uses this
field to figure out the real class of this object
(used by PolymorphicQuerySet._get_real_instances)
"""
if not self.polymorphic_ctype_id:
self.polymorphic_ctype = ContentType.objects.get_for_model(self, for_concrete_model=False)
def save(self, *args, **kwargs):
"""Overridden model save function which supports the polymorphism
functionality (through pre_save_polymorphic)."""
self.pre_save_polymorphic()
return super(PolymorphicModel, self).save(*args, **kwargs)
def get_real_instance_class(self):
"""
Normally not needed.
If a non-polymorphic manager (like base_objects) has been used to
retrieve objects, then the real class/type of these objects may be
determined using this method.
"""
# the following line would be the easiest way to do this, but it produces sql queries
# return self.polymorphic_ctype.model_class()
# so we use the following version, which uses the CopntentType manager cache.
# Note that model_class() can return None for stale content types;
# when the content type record still exists but no longer refers to an existing model.
try:
return ContentType.objects.get_for_id(self.polymorphic_ctype_id).model_class()
except AttributeError:
# Django <1.6 workaround
return None
def get_real_concrete_instance_class_id(self):
model_class = self.get_real_instance_class()
if model_class is None:
return None
return ContentType.objects.get_for_model(model_class, for_concrete_model=True).pk
def get_real_concrete_instance_class(self):
model_class = self.get_real_instance_class()
if model_class is None:
return None
return ContentType.objects.get_for_model(model_class, for_concrete_model=True).model_class()
def get_real_instance(self):
"""Normally not needed.
If a non-polymorphic manager (like base_objects) has been used to
retrieve objects, then the complete object with it's real class/type
and all fields may be retrieved with this method.
Each method call executes one db query (if necessary)."""
real_model = self.get_real_instance_class()
if real_model == self.__class__:
return self
return real_model.objects.get(pk=self.pk)
def __init__(self, * args, ** kwargs):
"""Replace Django's inheritance accessor member functions for our model
(self.__class__) with our own versions.
We monkey patch them until a patch can be added to Django
(which would probably be very small and make all of this obsolete).
If we have inheritance of the form ModelA -> ModelB ->ModelC then
Django creates accessors like this:
- ModelA: modelb
- ModelB: modela_ptr, modelb, modelc
- ModelC: modela_ptr, modelb, modelb_ptr, modelc
These accessors allow Django (and everyone else) to travel up and down
the inheritance tree for the db object at hand.
The original Django accessors use our polymorphic manager.
But they should not. So we replace them with our own accessors that use
our appropriate base_objects manager.
"""
super(PolymorphicModel, self).__init__(*args, ** kwargs)
if self.__class__.polymorphic_super_sub_accessors_replaced:
return
self.__class__.polymorphic_super_sub_accessors_replaced = True
def create_accessor_function_for_model(model, accessor_name):
def accessor_function(self):
attr = model.base_objects.get(pk=self.pk)
return attr
return accessor_function
subclasses_and_superclasses_accessors = self._get_inheritance_relation_fields_and_models()
from django.db.models.fields.related import SingleRelatedObjectDescriptor, ReverseSingleRelatedObjectDescriptor
for name, model in subclasses_and_superclasses_accessors.items():
orig_accessor = getattr(self.__class__, name, None)
if type(orig_accessor) in [SingleRelatedObjectDescriptor, ReverseSingleRelatedObjectDescriptor]:
#print >>sys.stderr, '---------- replacing', name, orig_accessor, '->', model
setattr(self.__class__, name, property(create_accessor_function_for_model(model, name)))
def _get_inheritance_relation_fields_and_models(self):
"""helper function for __init__:
determine names of all Django inheritance accessor member functions for type(self)"""
def add_model(model, as_ptr, result):
name = model.__name__.lower()
if as_ptr:
name += '_ptr'
result[name] = model
def add_model_if_regular(model, as_ptr, result):
if (issubclass(model, models.Model)
and model != models.Model
and model != self.__class__
and model != PolymorphicModel):
add_model(model, as_ptr, result)
def add_all_super_models(model, result):
add_model_if_regular(model, True, result)
for b in model.__bases__:
add_all_super_models(b, result)
def add_all_sub_models(model, result):
for b in model.__subclasses__():
add_model_if_regular(b, False, result)
result = {}
add_all_super_models(self.__class__, result)
add_all_sub_models(self.__class__, result)
return result
| |
"""Implementation of JSONDecoder
"""
from __future__ import absolute_import
import re
import sys
import struct
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
from .scanner import make_scanner
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = fromhex('7FF80000000000007FF0000000000000')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
try:
uni = int(esc, 16)
except ValueError:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
# Check for surrogate pair on UCS-4 systems
if _maxunicode > 65535:
unimask = uni & 0xfc00
if unimask == 0xd800:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 != 0xdc00:
msg = "Unpaired high surrogate"
raise JSONDecodeError(msg, s, end)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
elif unimask == 0xdc00:
msg = "Unpaired low surrogate"
raise JSONDecodeError(msg, s, end)
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, binary_type):
s = s.decode(self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if _PY3 and not isinstance(s, text_type):
raise TypeError("Input string must be text, not bytes")
try:
obj, end = self.scan_once(s, idx=_w(s, idx).end())
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| |
# Copyright (C) 2004 Python Software Foundation
# Author: barry@python.org (Barry Warsaw)
# License: http://www.opensource.org/licenses/PythonSoftFoundation.php
import unittest
from string import Template
class Bag:
pass
class Mapping:
def __getitem__(self, name):
obj = self
for part in name.split('.'):
try:
obj = getattr(obj, part)
except AttributeError:
raise KeyError(name)
return obj
class TestTemplate(unittest.TestCase):
def test_regular_templates(self):
s = Template('$who likes to eat a bag of $what worth $$100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
self.assertRaises(KeyError, s.substitute, dict(who='tim'))
def test_regular_templates_with_braces(self):
s = Template('$who likes ${what} for ${meal}')
d = dict(who='tim', what='ham', meal='dinner')
self.assertEqual(s.substitute(d), 'tim likes ham for dinner')
self.assertRaises(KeyError, s.substitute,
dict(who='tim', what='ham'))
def test_escapes(self):
eq = self.assertEqual
s = Template('$who likes to eat a bag of $$what worth $$100')
eq(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of $what worth $100')
s = Template('$who likes $$')
eq(s.substitute(dict(who='tim', what='ham')), 'tim likes $')
def test_percents(self):
eq = self.assertEqual
s = Template('%(foo)s $foo ${foo}')
d = dict(foo='baz')
eq(s.substitute(d), '%(foo)s baz baz')
eq(s.safe_substitute(d), '%(foo)s baz baz')
def test_stringification(self):
eq = self.assertEqual
s = Template('tim has eaten $count bags of ham today')
d = dict(count=7)
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
eq(s.safe_substitute(d), 'tim has eaten 7 bags of ham today')
s = Template('tim has eaten ${count} bags of ham today')
eq(s.substitute(d), 'tim has eaten 7 bags of ham today')
def test_tupleargs(self):
eq = self.assertEqual
s = Template('$who ate ${meal}')
d = dict(who=('tim', 'fred'), meal=('ham', 'kung pao'))
eq(s.substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
eq(s.safe_substitute(d), "('tim', 'fred') ate ('ham', 'kung pao')")
def test_SafeTemplate(self):
eq = self.assertEqual
s = Template('$who likes ${what} for ${meal}')
eq(s.safe_substitute(dict(who='tim')), 'tim likes ${what} for ${meal}')
eq(s.safe_substitute(dict(what='ham')), '$who likes ham for ${meal}')
eq(s.safe_substitute(dict(what='ham', meal='dinner')),
'$who likes ham for dinner')
eq(s.safe_substitute(dict(who='tim', what='ham')),
'tim likes ham for ${meal}')
eq(s.safe_substitute(dict(who='tim', what='ham', meal='dinner')),
'tim likes ham for dinner')
def test_invalid_placeholders(self):
raises = self.assertRaises
s = Template('$who likes $')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes ${what)')
raises(ValueError, s.substitute, dict(who='tim'))
s = Template('$who likes $100')
raises(ValueError, s.substitute, dict(who='tim'))
def test_idpattern_override(self):
class PathPattern(Template):
idpattern = r'[_a-z][._a-z0-9]*'
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = PathPattern('$bag.foo.who likes to eat a bag of $bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
def test_pattern_override(self):
class MyPattern(Template):
pattern = r"""
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@)
"""
m = Mapping()
m.bag = Bag()
m.bag.foo = Bag()
m.bag.foo.who = 'tim'
m.bag.what = 'ham'
s = MyPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertEqual(s.substitute(m), 'tim likes to eat a bag of ham')
class BadPattern(Template):
pattern = r"""
(?P<badname>.*) |
(?P<escaped>@{2}) |
@(?P<named>[_a-z][._a-z0-9]*) |
@{(?P<braced>[_a-z][._a-z0-9]*)} |
(?P<invalid>@) |
"""
s = BadPattern('@bag.foo.who likes to eat a bag of @bag.what')
self.assertRaises(ValueError, s.substitute, {})
self.assertRaises(ValueError, s.safe_substitute, {})
def test_braced_override(self):
class MyTemplate(Template):
pattern = r"""
\$(?:
(?P<escaped>$) |
(?P<named>[_a-z][_a-z0-9]*) |
@@(?P<braced>[_a-z][_a-z0-9]*)@@ |
(?P<invalid>) |
)
"""
tmpl = 'PyCon in $@@location@@'
t = MyTemplate(tmpl)
self.assertRaises(KeyError, t.substitute, {})
val = t.substitute({'location': 'Cleveland'})
self.assertEqual(val, 'PyCon in Cleveland')
def test_braced_override_safe(self):
class MyTemplate(Template):
pattern = r"""
\$(?:
(?P<escaped>$) |
(?P<named>[_a-z][_a-z0-9]*) |
@@(?P<braced>[_a-z][_a-z0-9]*)@@ |
(?P<invalid>) |
)
"""
tmpl = 'PyCon in $@@location@@'
t = MyTemplate(tmpl)
self.assertEqual(t.safe_substitute(), tmpl)
val = t.safe_substitute({'location': 'Cleveland'})
self.assertEqual(val, 'PyCon in Cleveland')
def test_unicode_values(self):
s = Template('$who likes $what')
d = dict(who='t\xffm', what='f\xfe\fed')
self.assertEqual(s.substitute(d), 't\xffm likes f\xfe\x0ced')
def test_keyword_arguments(self):
eq = self.assertEqual
s = Template('$who likes $what')
eq(s.substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
def test_keyword_arguments_safe(self):
eq = self.assertEqual
raises = self.assertRaises
s = Template('$who likes $what')
eq(s.safe_substitute(who='tim', what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='tim'), what='ham'), 'tim likes ham')
eq(s.safe_substitute(dict(who='fred', what='kung pao'),
who='tim', what='ham'),
'tim likes ham')
s = Template('the mapping is $mapping')
eq(s.safe_substitute(dict(foo='none'), mapping='bozo'),
'the mapping is bozo')
eq(s.safe_substitute(dict(mapping='one'), mapping='two'),
'the mapping is two')
d = dict(mapping='one')
raises(TypeError, s.substitute, d, {})
raises(TypeError, s.safe_substitute, d, {})
def test_delimiter_override(self):
eq = self.assertEqual
raises = self.assertRaises
class AmpersandTemplate(Template):
delimiter = '&'
s = AmpersandTemplate('this &gift is for &{who} &&')
eq(s.substitute(gift='bud', who='you'), 'this bud is for you &')
raises(KeyError, s.substitute)
eq(s.safe_substitute(gift='bud', who='you'), 'this bud is for you &')
eq(s.safe_substitute(), 'this &gift is for &{who} &')
s = AmpersandTemplate('this &gift is for &{who} &')
raises(ValueError, s.substitute, dict(gift='bud', who='you'))
eq(s.safe_substitute(), 'this &gift is for &{who} &')
class PieDelims(Template):
delimiter = '@'
s = PieDelims('@who likes to eat a bag of @{what} worth $100')
self.assertEqual(s.substitute(dict(who='tim', what='ham')),
'tim likes to eat a bag of ham worth $100')
def test_main():
from test import support
test_classes = [TestTemplate,]
support.run_unittest(*test_classes)
if __name__ == '__main__':
test_main()
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend eternitys received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a eternityd or Eternity-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the Eternity Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/EternityCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "EternityCore")
return os.path.expanduser("~/.eternitycore")
def read_bitcoin_config(dbdir):
"""Read the eternity.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "eternity.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a Eternity Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 14854 if testnet else 4854
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the eternityd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(eternityd):
info = eternityd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
eternityd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = eternityd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(eternityd):
address_summary = dict()
address_to_account = dict()
for info in eternityd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = eternityd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = eternityd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-eternity-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(eternityd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(eternityd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to eternityd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = eternityd.createrawtransaction(inputs, outputs)
signed_rawtx = eternityd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(eternityd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = eternityd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(eternityd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = eternityd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(eternityd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get eternitys from")
parser.add_option("--to", dest="to", default=None,
help="address to get send eternitys to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of eternity.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
eternityd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(eternityd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(eternityd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(eternityd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(eternityd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = eternityd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
"""Tests for chebyshev module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
def trim(x) :
return cheb.chebtrim(x, tol=1e-6)
T0 = [ 1]
T1 = [ 0, 1]
T2 = [-1, 0, 2]
T3 = [ 0, -3, 0, 4]
T4 = [ 1, 0, -8, 0, 8]
T5 = [ 0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [ 0, -7, 0, 56, 0, -112, 0, 64]
T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(TestCase) :
def test__cseries_to_zseries(self) :
for i in range(5) :
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self) :
for i in range(5) :
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(TestCase) :
def test_chebdomain(self) :
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self) :
assert_equal(cheb.chebzero, [0])
def test_chebone(self) :
assert_equal(cheb.chebone, [1])
def test_chebx(self) :
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic(TestCase) :
def test_chebadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0,1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self) :
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
y = [polyval(x, c) for c in Tlist]
for i in range(10) :
msg = "At i=%d" % i
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
assert_equal(cheb.chebval(x, [1,0]).shape, dims)
assert_equal(cheb.chebval(x, [1,0,0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2,3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2,3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase) :
def test_chebint(self) :
# check exceptions
assert_raises(ValueError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k])
res = cheb.chebint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
res = cheb.chebint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
res = cheb.chebint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
res = cheb.chebint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c) for c in c2d])
res = cheb.chebint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
res = cheb.chebint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase) :
def test_chebder(self) :
# check exceptions
assert_raises(ValueError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [0]*i + [1]
res = cheb.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
res = cheb.chebder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebder(c) for c in c2d])
res = cheb.chebder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_chebvander(self) :
# check for 1d x
x = np.arange(3)
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
def test_chebvander2d(self) :
# also tests chebval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = cheb.chebvander2d(x1, x2, [1, 2])
tgt = cheb.chebval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_chebvander3d(self) :
# also tests chebval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3])
tgt = cheb.chebval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_chebfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
assert_raises(TypeError, cheb.chebfit, [], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = cheb.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
#
coef4 = cheb.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
#
coef2d = cheb.chebfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = cheb.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = cheb.chebfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
class TestGauss(TestCase):
def test_100(self):
x, w = cheb.chebgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = cheb.chebvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:,None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.pi
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase) :
def test_chebfromroots(self) :
res = cheb.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = cheb.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res),trim(tgt))
def test_chebroots(self) :
assert_almost_equal(cheb.chebroots([1]), [])
assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = cheb.chebroots(cheb.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebtrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, cheb.chebtrim, coef, -1)
# Test results
assert_equal(cheb.chebtrim(coef), coef[:-1])
assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
assert_equal(cheb.chebtrim(coef, 2), [0])
def test_chebline(self) :
assert_equal(cheb.chebline(3,4), [3, 4])
def test_cheb2poly(self) :
for i in range(10) :
assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self) :
for i in range(10) :
assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)[1:-1]
tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x))
res = cheb.chebweight(x)
assert_almost_equal(res, tgt)
def test_chebpts1(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts1, 1.5)
assert_raises(ValueError, cheb.chebpts1, 0)
#test points
tgt = [0]
assert_almost_equal(cheb.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(cheb.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(cheb.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(cheb.chebpts1(4), tgt)
def test_chebpts2(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts2, 1.5)
assert_raises(ValueError, cheb.chebpts2, 1)
#test points
tgt = [-1, 1]
assert_almost_equal(cheb.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(cheb.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
if __name__ == "__main__":
run_module_suite()
| |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import Series, Timestamp, date_range, isna
import pandas._testing as tm
def test_where_unsafe_int(sint_dtype):
s = Series(np.arange(10), dtype=sint_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_dtype):
s = Series(np.arange(10), dtype=float_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
msg = "cannot set using a {} indexer with a different length than the value"
# slice
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg.format("slice")):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
tm.assert_series_equal(s.astype(np.int64), expected)
# slice with step
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg.format("slice")):
s[0:4:2] = list(range(27))
s = Series(list("abcdef"))
s[0:4:2] = list(range(2))
expected = Series([0, "b", 1, "d", "e", "f"])
tm.assert_series_equal(s, expected)
# neg slices
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg.format("slice")):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(["a", "b", "c", 0, 1, "f"])
tm.assert_series_equal(s, expected)
# list
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg.format("list-like")):
s[[0, 1, 2]] = list(range(27))
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg.format("list-like")):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list("abc"))
s[0] = list(range(10))
expected = Series([list(range(10)), "b", "c"])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("size", range(2, 6))
@pytest.mark.parametrize(
"mask", [[True, False, False, False, False], [True, False], [False]]
)
@pytest.mark.parametrize(
"item", [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]
)
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize(
"box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
)
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series(
[item if use_item else data[i] for i, use_item in enumerate(selection)]
)
s = Series(data)
s[selection] = box(item)
tm.assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
tm.assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
tm.assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[cond])
tm.assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, "X")
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, ["X", "Y", "Z"])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, np.array(["X", "Y", "Z"]))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
def test_where_timedelta_coerce():
s = Series([1, 2], dtype="timedelta64[ns]")
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range("20130102", periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
# GH 15701
timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
tm.assert_series_equal(rs, expected)
def test_where_dt_tz_values(tz_naive_fixture):
ser1 = pd.Series(
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
)
ser2 = pd.Series(
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
)
mask = pd.Series([True, True, False])
result = ser1.where(mask, ser2)
exp = pd.Series(
pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
)
tm.assert_series_equal(exp, result)
def test_where_sparse():
# GH#17198 make sure we dont get an AttributeError for sp_index
ser = pd.Series(pd.arrays.SparseArray([1, 2]))
result = ser.where(ser >= 2, 0)
expected = pd.Series(pd.arrays.SparseArray([0, 2]))
tm.assert_series_equal(result, expected)
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.dispatch
~~~~~~~~~~~~~~
Actor that controls the top-level dispatch chains that dispatch to
per-endpoint chains.
"""
from collections import defaultdict
import logging
from calico.felix.actor import Actor, actor_message, wait_and_check
from calico.felix.frules import (
CHAIN_TO_ENDPOINT, CHAIN_FROM_ENDPOINT, CHAIN_FROM_LEAF, CHAIN_TO_LEAF,
chain_names, interface_to_suffix
)
_log = logging.getLogger(__name__)
class DispatchChains(Actor):
"""
Actor that owns the felix-TO/FROM-ENDPOINT chains, which we use to
dispatch to endpoint-specific chains.
LocalEndpoint Actors give us kicks as they come and go so we can
add/remove them from the chains.
"""
def __init__(self, config, ip_version, iptables_updater):
super(DispatchChains, self).__init__(qualifier="v%d" % ip_version)
self.config = config
self.ip_version = ip_version
self.iptables_updater = iptables_updater
self.ifaces = set()
self.programmed_leaf_chains = set()
self._dirty = False
@actor_message()
def apply_snapshot(self, ifaces):
"""
Replaces all known interfaces with the given snapshot and rewrites the
chain.
:param set[str] ifaces: The interface
"""
_log.info("Applying dispatch chains snapshot.")
self.ifaces = set(ifaces) # Take a copy.
# Always reprogram the chain, even if it's empty. This makes sure that
# we resync and it stops the iptables layer from marking our chain as
# missing.
self._dirty = True
@actor_message()
def on_endpoint_added(self, iface_name):
"""
Message sent to us by the LocalEndpoint to tell us we should
add it to the dispatch chain.
Idempotent: does nothing if the mapping is already in the
chain.
:param iface_name: name of the linux interface.
"""
_log.debug("%s ready: %s", self, iface_name)
if iface_name in self.ifaces:
return
self.ifaces.add(iface_name)
self._dirty = True
@actor_message()
def on_endpoint_removed(self, iface_name):
"""
Removes the mapping for the given interface name.
Idempotent: does nothing if there is no mapping.
"""
_log.debug("%s asked to remove dispatch rule %s", self, iface_name)
# It should be present but be defensive and reprogram the chain
# just in case if not.
try:
self.ifaces.remove(iface_name)
except KeyError:
_log.warning(
'Attempted to remove unmanaged interface %s', iface_name
)
else:
self._dirty = True
def _finish_msg_batch(self, batch, results):
if self._dirty:
_log.debug("Interface mapping changed, reprogramming chains.")
self._reprogram_chains()
self._dirty = False
def _calculate_update(self, ifaces):
"""
Calculates the iptables update to rewrite our chains.
To avoid traversing lots of dispatch rules to find the right one,
we build a tree of chains. Currently, the tree can only be
two layers deep: a root chain and a layer of leaves.
Interface names look like this: "prefix1234abc". The "prefix"
part is always the same so we ignore it. We call "1234abc", the
"suffix".
The root chain contains two sorts of rules:
* where there are multiple interfaces whose suffixes start with
the same character, it contains a rule that matches on
that prefix of the "suffix"(!) and directs the packet to a
leaf chain for that prefix.
* as an optimization, if there is only one interface whose
suffix starts with a given character, it contains a dispatch
rule for that exact interface name.
For example, if we have interface names "tapA1" "tapB1" "tapB2",
we'll get (in pseudo code):
Root chain:
if interface=="tapA1" then goto chain for endpoint tapA1
if interface.startswith("tapB") then goto leaf chain for prefix "tapB"
tapB leaf chain:
if interface=="tapB1" then goto chain for endpoint tapB1
if interface=="tapB2" then goto chain for endpoint tapB2
:param set[str] ifaces: The list of interfaces to generate a
dispatch chain for.
:returns Tuple: to_delete, deps, updates, new_leaf_chains:
* set of leaf chains that are no longer needed for deletion
* chain dependency dict.
* chain updates dict.
* complete set of leaf chains that are now required.
"""
# iptables update fragments/dependencies for the root chains.
updates = defaultdict(list)
root_to_upds = updates[CHAIN_TO_ENDPOINT]
root_from_upds = updates[CHAIN_FROM_ENDPOINT]
dependencies = defaultdict(set)
root_to_deps = dependencies[CHAIN_TO_ENDPOINT]
root_from_deps = dependencies[CHAIN_FROM_ENDPOINT]
# Separate the interface names by their prefixes so we can count them
# and decide whether to program a leaf chain or not.
interfaces_by_prefix = defaultdict(set)
for iface in ifaces:
ep_suffix = interface_to_suffix(self.config, iface)
prefix = ep_suffix[:1]
interfaces_by_prefix[prefix].add(iface)
# Spin through the interfaces by prefix. Either add them directly
# to the root chain or create a leaf and add them there.
new_leaf_chains = set()
for prefix, interfaces in interfaces_by_prefix.iteritems():
use_root_chain = len(interfaces) == 1
if use_root_chain:
# Optimization: there's only one interface with this prefix,
# don't program a leaf chain.
disp_to_chain = CHAIN_TO_ENDPOINT
disp_from_chain = CHAIN_FROM_ENDPOINT
to_deps = root_to_deps
from_deps = root_from_deps
to_upds = root_to_upds
from_upds = root_from_upds
else:
# There's more than one interface with this prefix, program
# a leaf chain.
disp_to_chain = CHAIN_TO_LEAF + "-" + prefix
disp_from_chain = CHAIN_FROM_LEAF + "-" + prefix
to_upds = updates[disp_to_chain]
from_upds = updates[disp_from_chain]
to_deps = dependencies[disp_to_chain]
from_deps = dependencies[disp_from_chain]
new_leaf_chains.add(disp_from_chain)
new_leaf_chains.add(disp_to_chain)
# Root chain depends on its leaves.
root_from_deps.add(disp_to_chain)
root_to_deps.add(disp_from_chain)
# Point root chain at prefix chain.
iface_match = self.config.IFACE_PREFIX + prefix + "+"
root_from_upds.append(
"--append %s --in-interface %s --goto %s" %
(CHAIN_FROM_ENDPOINT, iface_match, disp_from_chain)
)
root_to_upds.append(
"--append %s --out-interface %s --goto %s" %
(CHAIN_TO_ENDPOINT, iface_match, disp_to_chain)
)
# Common processing, add the per-endpoint rules to whichever
# chain we decided above.
for iface in interfaces:
# Add rule to leaf or global chain to direct traffic to the
# endpoint-specific one. Note that we use --goto, which means
# that the endpoint-specific chain will return to our parent
# rather than to this chain.
ep_suffix = interface_to_suffix(self.config, iface)
to_chain_name, from_chain_name = chain_names(ep_suffix)
from_upds.append("--append %s --in-interface %s --goto %s" %
(disp_from_chain, iface, from_chain_name))
from_deps.add(from_chain_name)
to_upds.append("--append %s --out-interface %s --goto %s" %
(disp_to_chain, iface, to_chain_name))
to_deps.add(to_chain_name)
if not use_root_chain:
# Add a default drop to the end of the leaf chain.
from_upds.append("--append %s --jump DROP" % disp_from_chain)
to_upds.append("--append %s --jump DROP" % disp_to_chain)
# Both TO and FROM chains end with a DROP so that interfaces that
# we don't know about yet can't bypass our rules.
root_from_upds.append("--append %s --jump DROP" % CHAIN_FROM_ENDPOINT)
root_to_upds.append("--append %s --jump DROP" % CHAIN_TO_ENDPOINT)
chains_to_delete = self.programmed_leaf_chains - new_leaf_chains
return chains_to_delete, dependencies, updates, new_leaf_chains
def _reprogram_chains(self):
"""
Recalculates the chains and writes them to iptables.
Synchronous, doesn't return until the chain is in place.
"""
_log.info("%s Updating dispatch chain, num entries: %s", self,
len(self.ifaces))
update = self._calculate_update(self.ifaces)
to_delete, deps, updates, new_leaf_chains = update
futures = [
self.iptables_updater.rewrite_chains(updates, deps,
async=True),
self.iptables_updater.delete_chains(to_delete,
async=True),
]
wait_and_check(futures)
# Track our chains so we can clean them up.
self.programmed_leaf_chains = new_leaf_chains
def __str__(self):
return (
self.__class__.__name__ + "<ipv%s,entries=%s>" %
(self.ip_version, len(self.ifaces))
)
| |
#!/usr/bin/env python3
from io import StringIO
import pandas as pd
import traceback
import psycopg2
import boto3
import sys
import os
import re
import uuid
import logging
S3_ACCEPTED_KWARGS = [
'ACL', 'Body', 'CacheControl ', 'ContentDisposition', 'ContentEncoding', 'ContentLanguage',
'ContentLength', 'ContentMD5', 'ContentType', 'Expires', 'GrantFullControl', 'GrantRead',
'GrantReadACP', 'GrantWriteACP', 'Metadata', 'ServerSideEncryption', 'StorageClass',
'WebsiteRedirectLocation', 'SSECustomerAlgorithm', 'SSECustomerKey', 'SSECustomerKeyMD5',
'SSEKMSKeyId', 'RequestPayer', 'Tagging'
] # Available parameters for service: https://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.put_object
logging_config = {
'logger_level': logging.INFO,
'mask_secrets': True
}
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def set_log_level(level, mask_secrets=True):
log_level_map = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARN,
'error': logging.ERROR
}
logging_config['logger_level'] = log_level_map[level]
logger = logging.getLogger(__name__)
logger.setLevel(logging_config['logger_level'])
logging_config['mask_secrets'] = mask_secrets
def mask_aws_credentials(s):
if logging_config['mask_secrets']:
import re
s = re.sub('(?<=access_key_id \')(.*)(?=\')', '*'*8, s)
s = re.sub('(?<=secret_access_key \')(.*)(?=\')', '*'*8, s)
return s
def connect_to_redshift(dbname, host, user, port=5439, **kwargs):
global connect, cursor
connect = psycopg2.connect(dbname=dbname,
host=host,
port=port,
user=user,
**kwargs)
cursor = connect.cursor()
def connect_to_s3(aws_access_key_id, aws_secret_access_key, bucket, subdirectory=None, aws_iam_role=None, **kwargs):
global s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token, aws_role
s3 = boto3.resource('s3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**kwargs)
s3_bucket_var = bucket
if subdirectory is None:
s3_subdirectory_var = ''
else:
s3_subdirectory_var = subdirectory + '/'
aws_1 = aws_access_key_id
aws_2 = aws_secret_access_key
aws_role = aws_iam_role
if kwargs.get('aws_session_token'):
aws_token = kwargs.get('aws_session_token')
else:
aws_token = ''
def redshift_to_pandas(sql_query, query_params=None):
# pass a sql query and return a pandas dataframe
cursor.execute(sql_query, query_params)
columns_list = [desc[0] for desc in cursor.description]
data = pd.DataFrame(cursor.fetchall(), columns=columns_list)
return data
def validate_column_names(data_frame):
"""Validate the column names to ensure no reserved words are used.
Arguments:
dataframe pd.data_frame -- data to validate
"""
rrwords = open(os.path.join(os.path.dirname(__file__),
'redshift_reserve_words.txt'), 'r').readlines()
rrwords = [r.strip().lower() for r in rrwords]
data_frame.columns = [x.lower() for x in data_frame.columns]
for col in data_frame.columns:
try:
assert col not in rrwords
except AssertionError:
raise ValueError(
'DataFrame column name {0} is a reserve word in redshift'
.format(col))
# check for spaces in the column names
there_are_spaces = sum(
[re.search('\s', x) is not None for x in data_frame.columns]) > 0
# delimit them if there are
if there_are_spaces:
col_names_dict = {x: '"{0}"'.format(x) for x in data_frame.columns}
data_frame.rename(columns=col_names_dict, inplace=True)
return data_frame
def df_to_s3(data_frame, csv_name, index, save_local, delimiter, verbose=True, **kwargs):
"""Write a dataframe to S3
Arguments:
dataframe pd.data_frame -- data to upload
csv_name str -- name of the file to upload
save_local bool -- save a local copy
delimiter str -- delimiter for csv file
"""
extra_kwargs = {k: v for k, v in kwargs.items(
) if k in S3_ACCEPTED_KWARGS and v is not None}
# create local backup
if save_local:
data_frame.to_csv(csv_name, index=index, sep=delimiter)
if verbose:
logger.info('saved file {0} in {1}'.format(csv_name, os.getcwd()))
#
csv_buffer = StringIO()
data_frame.to_csv(csv_buffer, index=index, sep=delimiter)
s3.Bucket(s3_bucket_var).put_object(
Key=s3_subdirectory_var + csv_name, Body=csv_buffer.getvalue(),
**extra_kwargs)
if verbose:
logger.info('saved file {0} in bucket {1}'.format(
csv_name, s3_subdirectory_var + csv_name))
def pd_dtype_to_redshift_dtype(dtype):
if dtype.startswith('int64'):
return 'BIGINT'
elif dtype.startswith('int'):
return 'INTEGER'
elif dtype.startswith('float'):
return 'REAL'
elif dtype.startswith('datetime'):
return 'TIMESTAMP'
elif dtype == 'bool':
return 'BOOLEAN'
else:
return 'VARCHAR(256)'
def get_column_data_types(data_frame, index=False):
column_data_types = [pd_dtype_to_redshift_dtype(dtype.name)
for dtype in data_frame.dtypes.values]
if index:
column_data_types.insert(
0, pd_dtype_to_redshift_dtype(data_frame.index.dtype.name))
return column_data_types
def create_redshift_table(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
append=False,
diststyle='even',
distkey='',
sort_interleaved=False,
sortkey='',
verbose=True):
"""Create an empty RedShift Table
"""
if index:
columns = list(data_frame.columns)
if data_frame.index.name:
columns.insert(0, data_frame.index.name)
else:
columns.insert(0, "index")
else:
columns = list(data_frame.columns)
if column_data_types is None:
column_data_types = get_column_data_types(data_frame, index)
columns_and_data_type = ', '.join(
['{0} {1}'.format(x, y) for x, y in zip(columns, column_data_types)])
create_table_query = 'create table {0} ({1})'.format(
redshift_table_name, columns_and_data_type)
if not distkey:
# Without a distkey, we can set a diststyle
if diststyle not in ['even', 'all']:
raise ValueError("diststyle must be either 'even' or 'all'")
else:
create_table_query += ' diststyle {0}'.format(diststyle)
else:
# otherwise, override diststyle with distkey
create_table_query += ' distkey({0})'.format(distkey)
if len(sortkey) > 0:
if sort_interleaved:
create_table_query += ' interleaved'
create_table_query += ' sortkey({0})'.format(sortkey)
if verbose:
logger.info(create_table_query)
logger.info('CREATING A TABLE IN REDSHIFT')
cursor.execute('drop table if exists {0}'.format(redshift_table_name))
cursor.execute(create_table_query)
connect.commit()
def s3_to_redshift(redshift_table_name, csv_name, delimiter=',', quotechar='"',
dateformat='auto', timeformat='auto', region='', parameters='', verbose=True):
bucket_name = 's3://{0}/{1}'.format(
s3_bucket_var, s3_subdirectory_var + csv_name)
if aws_1 and aws_2:
authorization = """
access_key_id '{0}'
secret_access_key '{1}'
""".format(aws_1, aws_2)
elif aws_role:
authorization = """
iam_role '{0}'
""".format(aws_role)
else:
authorization = ""
s3_to_sql = """
copy {0}
from '{1}'
delimiter '{2}'
ignoreheader 1
csv quote as '{3}'
dateformat '{4}'
timeformat '{5}'
{6}
{7}
""".format(redshift_table_name, bucket_name, delimiter, quotechar, dateformat,
timeformat, authorization, parameters)
if region:
s3_to_sql = s3_to_sql + "region '{0}'".format(region)
if aws_token != '':
s3_to_sql = s3_to_sql + "\n\tsession_token '{0}'".format(aws_token)
s3_to_sql = s3_to_sql + ';'
if verbose:
logger.info(mask_aws_credentials(s3_to_sql))
# send the file
logger.info('FILLING THE TABLE IN REDSHIFT')
try:
cursor.execute(s3_to_sql)
connect.commit()
except Exception as e:
logger.error(e)
traceback.print_exc(file=sys.stdout)
connect.rollback()
raise
def pandas_to_redshift(data_frame,
redshift_table_name,
column_data_types=None,
index=False,
save_local=False,
delimiter=',',
quotechar='"',
dateformat='auto',
timeformat='auto',
region='',
append=False,
diststyle='even',
distkey='',
sort_interleaved=False,
sortkey='',
parameters='',
verbose=True,
**kwargs):
# Validate column names.
data_frame = validate_column_names(data_frame)
# Send data to S3
csv_name = '{}-{}.csv'.format(redshift_table_name, uuid.uuid4())
s3_kwargs = {k: v for k, v in kwargs.items()
if k in S3_ACCEPTED_KWARGS and v is not None}
df_to_s3(data_frame, csv_name, index, save_local, delimiter, verbose=verbose, **s3_kwargs)
# CREATE AN EMPTY TABLE IN REDSHIFT
if not append:
create_redshift_table(data_frame, redshift_table_name,
column_data_types, index, append,
diststyle, distkey, sort_interleaved, sortkey, verbose=verbose)
# CREATE THE COPY STATEMENT TO SEND FROM S3 TO THE TABLE IN REDSHIFT
s3_to_redshift(redshift_table_name, csv_name, delimiter, quotechar,
dateformat, timeformat, region, parameters, verbose=verbose)
def exec_commit(sql_query):
cursor.execute(sql_query)
connect.commit()
def close_up_shop():
global connect, cursor, s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
cursor.close()
connect.commit()
connect.close()
try:
del connect, cursor
except:
pass
try:
del s3, s3_bucket_var, s3_subdirectory_var, aws_1, aws_2, aws_token
except:
pass
# -------------------------------------------------------------------------------
| |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from warnings import warn
import types
from future.builtins import zip
from . import (UnrecognizedFormatError, ArgumentOverrideWarning,
DuplicateRegistrationError, UnprovenFormatWarning)
from .util import open_file, open_files, _is_string_or_bytes
from skbio.util import flatten
_formats = {}
_sniffers = {}
_aliases = {}
def _compound_format(fmts):
return ', '.join(fmts)
def _factor_format(fmt):
if _is_string_or_bytes(fmt):
return [f.strip() for f in fmt.split(',')]
return fmt
def _format_len(fmt):
return len(_factor_format(fmt))
def _normalize_format(fmt):
"""Return normalized format string, is_compound format."""
if _is_string_or_bytes(fmt):
return _compound_format(sorted(
_factor_format(fmt.lower()))), ',' in fmt
else:
return _compound_format(sorted([_normalize_format(f)[0] for f in
fmt])), True
def _is_iter_list(x):
return (hasattr(x, '__iter__') and not hasattr(x, 'read') and
not _is_string_or_bytes(x))
def _setup_kwargs(kws):
if len(kws) == 1:
return kws[0]
kwargs = {}
for key in flatten([k.keys() for k in kws]):
kwarg = []
for kw in kws:
kwarg.append(kw.get(key, None))
kwargs[key] = kwarg
return kwargs
def _override_kwarg(kw, key, value, warn_user):
if key in kw and warn_user and kw[key] != value:
warn('Best guess was: %s=%s, continuing with user supplied: %s' % (
key, str(kw[key]), str(value)
), ArgumentOverrideWarning)
kw[key] = value
def _override_kwargs(kw, fmt_kw, fmt_len, warn_user):
for key in kw:
if fmt_len > 1 and (not _is_iter_list(kw[key]) or
len(kw[key]) != fmt_len):
_override_kwarg(fmt_kw, key, [kw[key]] * fmt_len, warn_user)
else:
_override_kwarg(fmt_kw, key, kw[key], warn_user)
return fmt_kw
def register_sniffer(format):
"""Return a decorator for a sniffer function.
A decorator factory for sniffer functions. Sniffers may only be registered
to simple formats. Sniffers for compound formats are automatically
generated from their component simple formats.
A sniffer function should have at least the following signature:
``<format_name>_sniffer(fh)``. `fh` is **always** an open filehandle.
This decorator provides the ability to use filepaths in the same argument
position as `fh`. They will automatically be opened and closed.
**The sniffer must not close the filehandle**, cleanup will be
handled external to the sniffer and is not its concern.
`**kwargs` are not passed to a sniffer, and a sniffer must not use them.
The job of a sniffer is to determine if a file appears to be in the given
format and to 'sniff' out any kwargs that would be of use to a reader
function.
The sniffer **must** return a tuple of (True, <kwargs dict>) if it believes
`fh` is a given `format`. Otherwise it should return (False, {}).
.. note:: Failure to adhere to the above interface specified for a sniffer
will result in unintended side-effects.
The sniffer may determine membership of a file in as many or as few
lines of the file as it deems necessary.
Parameters
----------
format : str
A format name which a decorated sniffer will be bound to.
Returns
-------
function
A decorator to be used on a sniffer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*sniffer* bound to the `format`.
See Also
--------
skbio.io.sniff
"""
fmt, is_compound = _normalize_format(format)
if is_compound:
raise ValueError("'register_sniffer' cannot be applied to compound "
"formats.")
def decorator(sniffer):
if fmt in _sniffers:
raise DuplicateRegistrationError(msg="'%s' already has a sniffer."
% fmt)
def wrapped_sniffer(fp, mode='U', **kwargs):
with open_file(fp, mode) as fh:
orig_pos = fh.tell()
fh.seek(0)
result = sniffer(fh, **kwargs)
fh.seek(orig_pos)
return result
wrapped_sniffer.__doc__ = sniffer.__doc__
wrapped_sniffer.__name__ = sniffer.__name__
_sniffers[fmt] = wrapped_sniffer
return wrapped_sniffer
return decorator
def register_reader(format, cls=None):
"""Return a decorator for a reader function.
A decorator factory for reader functions.
A reader function should have at least the following signature:
``<format_name>_to_<class_name_or_generator>(fh)``. `fh` is **always** an
open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The reader must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern. This is true even
in the case of generators.
Any additional `**kwargs` will be passed to the reader and may
be used if necessary.
In the event of a compound format (`['format1', 'format2']`) filehandles
will be unrolled in the same order as the format and ALL kwarg arguments
will be passed as tuples in the same order as the format. i.e.
``def format1_format2_to_generator(fmt1_fh, fmt2_fh, some_arg=(1, 2)):``
The reader **must** return an instance of `cls` if `cls` is not None.
Otherwise the reader must return a generator. The generator need not deal
with closing the `fh`. That is already handled by this decorator.
.. note:: Failure to adhere to the above interface specified for a reader
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated reader will be bound to.
cls : type, optional
The class which a decorated reader will be bound to. When `cls` is None
the reader will be bound as returning a generator.
Default is None.
Returns
-------
function
A decorator to be used on a reader. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*reader* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.read
"""
fmt, is_compound = _normalize_format(format)
def decorator(reader):
format_class = _formats.setdefault(fmt, {}).setdefault(cls, {})
if 'reader' in format_class:
raise DuplicateRegistrationError('reader', fmt, cls)
if cls is None:
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
if not _is_iter_list(fp):
fp = [fp]
with open_files(fp, mode) as fhs:
generator = reader(*fhs, **kwargs)
if mutate_fh or (not is_compound and
_is_string_or_bytes(fp[0])):
while True:
yield next(generator)
else:
orig_positions = [fh.tell() for fh in fhs]
read_positions = orig_positions
try:
while True:
orig_positions = [fh.tell() for fh in fhs]
for fh, pos in zip(fhs, read_positions):
fh.seek(pos)
next_result = next(generator)
read_positions = [fh.tell() for fh in fhs]
for fh, pos in zip(fhs, orig_positions):
fh.seek(pos)
yield next_result
finally:
for fh, pos in zip(fhs, orig_positions):
fh.seek(pos)
else:
def wrapped_reader(fp, mode='U', mutate_fh=False, **kwargs):
if not _is_iter_list(fp):
fp = [fp]
with open_files(fp, mode) as fhs:
orig_positions = [fh.tell() for fh in fhs]
result = reader(*fhs, **kwargs)
if not mutate_fh:
for fh, pos in zip(fhs, orig_positions):
fh.seek(pos)
return result
wrapped_reader.__doc__ = reader.__doc__
wrapped_reader.__name__ = reader.__name__
format_class['reader'] = wrapped_reader
format_class['reader_args'] = _factor_format(format)
return wrapped_reader
return decorator
def register_writer(format, cls=None):
"""Return a decorator for a writer function.
A decorator factory for writer functions.
A writer function should have at least the following signature:
``<class_name_or_generator>_to_<format_name>(obj, fh)``. `fh` is **always**
an open filehandle. This decorator provides the ability to use filepaths in
the same argument position as `fh`. They will automatically be opened and
closed.
**The writer must not close the filehandle**, cleanup will be
handled external to the reader and is not its concern.
Any additional `**kwargs` will be passed to the writer and may be used if
necessary.
The writer must not return a value. Instead it should only mutate the `fh`
in a way consistent with it's purpose.
In the event of a compound format (`['format1', 'format2']`) filehandles
will be unrolled in the same order as the format and ALL kwarg arguments
will be passed as tuples in the same order as the format. i.e.
``def gen_to_format1_format2(gen, fmt1_fh, fmt2_fh, some_arg=(1, 2)):``
If the writer accepts a generator, it should exhaust the generator to
ensure that the potentially open filehandle backing said generator is
closed.
.. note:: Failure to adhere to the above interface specified for a writer
will result in unintended side-effects.
Parameters
----------
format : str
A format name which a decorated writer will be bound to.
cls : type, optional
The class which a decorated writer will be bound to. If `cls` is None
the writer will be bound as expecting a generator.
Default is None.
Returns
-------
function
A decorator to be used on a writer. The decorator will raise a
``skbio.io.DuplicateRegistrationError`` if there already exists a
*writer* bound to the same permutation of `fmt` and `cls`.
See Also
--------
skbio.io.write
skbio.io.get_writer
"""
fmt, is_compound = _normalize_format(format)
def decorator(writer):
format_class = _formats.setdefault(fmt, {}).setdefault(cls, {})
if 'writer' in format_class:
raise DuplicateRegistrationError('writer', fmt, cls)
def wrapped_writer(obj, fp, mode='w', **kwargs):
if not _is_iter_list(fp):
fp = [fp]
with open_files(fp, mode) as fhs:
writer(obj, *fhs, **kwargs)
wrapped_writer.__doc__ = writer.__doc__
wrapped_writer.__name__ = writer.__name__
format_class['writer'] = wrapped_writer
format_class['writer_args'] = _factor_format(format)
return wrapped_writer
return decorator
def list_read_formats(cls):
"""Return a list of available read formats for a given `cls` type.
Parameters
----------
cls : type
The class which will be used to determine what read formats exist for
an instance of `cls`.
Returns
-------
list
A list of available read formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_reader
"""
return _rw_list_formats('reader', cls)
def list_write_formats(cls):
"""Return a list of available write formats for a given `cls` instance.
Parameters
----------
cls : type
The class which will be used to determine what write formats exist for
an instance of `cls`.
Returns
-------
list
A list of available write formats for an instance of `cls`. List may be
empty.
See Also
--------
skbio.io.register_writer
"""
return _rw_list_formats('writer', cls)
def _rw_list_formats(name, cls):
formats = []
for fmt in _formats:
if cls in _formats[fmt]:
if name in _formats[fmt][cls]:
f = _formats[fmt][cls][name+'_args']
formats.append(_compound_format(f))
return formats
def get_sniffer(format):
"""Return a sniffer for a format.
Parameters
----------
format : str
A format string which has a registered sniffer.
Returns
-------
function or None
Returns a sniffer function if one exists for the given `fmt`.
Otherwise it will return None.
See Also
--------
skbio.io.register_sniffer
"""
fmt, is_compound = _normalize_format(format)
if not is_compound:
if fmt in _sniffers:
return _sniffers[fmt]
return None
else:
sniffers = []
for f in _factor_format(format):
sniffer = get_sniffer(f)
if sniffer is None:
return None
sniffers.append(sniffer)
def sniffer(fp, mode='U'):
kwargs = []
if not _is_iter_list(fp):
raise ValueError('Must supply a list of files.')
if len(fp) != len(sniffers):
raise ValueError('List length (%d) must be %d.'
% (len(fp), len(sniffers)))
for f, sniffer in zip(fp, sniffers):
is_format, fmt_kwargs = sniffer(f, mode=mode)
if not is_format:
return False, {}
kwargs.append(fmt_kwargs)
return True, _setup_kwargs(kwargs)
return sniffer
def get_reader(format, cls=None):
"""Return a reader for a format.
Parameters
----------
format : str or iterable of str
A registered format string or compound format.
cls : type, optional
The class which the reader will return an instance of. If `cls` is
None, the reader will return a generator.
Default is None.
Returns
-------
function or None
Returns a reader function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_reader
"""
fmt, is_compound = _normalize_format(format)
composition = _factor_format(format)
reader, original_format_order = _rw_getter('reader', fmt, cls)
if reader is None:
return None
if not is_compound or original_format_order == composition:
return reader
# Time to generate a flip on the fly! :rimshot:
def generated_reader(fp, **kwargs):
if len(fp) != len(original_format_order):
raise ValueError('List length (%d) must be %d.'
% (len(fp), len(original_format_order)))
mapped_fp = [None for f in fp]
for i, f in enumerate(original_format_order):
mapped_fp[i] = fp[composition.index(f)]
return reader(mapped_fp, **kwargs)
generated_reader.__name__ = 'flip_of_' + reader.__name__
return generated_reader
def get_writer(format, cls=None):
"""Return a writer for a format.
Parameters
----------
format : str or iterable of str
A registered format string or compound format.
cls : type, optional
The class which the writer will expect an instance of. If `cls` is
None, the writer will expect a generator that is identical to what
is returned by ``get_reader(<some_format>, None)``.
Default is None.
Returns
-------
function or None
Returns a writer function if one exists for a given `fmt` and `cls`.
Otherwise it will return None.
See Also
--------
skbio.io.register_writer
skbio.io.get_reader
"""
fmt, is_compound = _normalize_format(format)
composition = _factor_format(format)
writer, original_format_order = _rw_getter('writer', fmt, cls)
if writer is None:
return None
if not is_compound or original_format_order == composition:
return writer
def generated_writer(obj, fp, **kwargs):
if len(fp) != len(original_format_order):
raise ValueError('List length (%d) must be %d.'
% (len(fp), len(original_format_order)))
mapped_fp = [None for f in fp]
for i, f in enumerate(original_format_order):
mapped_fp[i] = fp[composition.index(f)]
return writer(obj, mapped_fp, **kwargs)
generated_writer.__name__ = 'flip_of_' + writer.__name__
return generated_writer
def _rw_getter(name, fmt, cls):
if fmt in _formats:
if cls in _formats[fmt]:
if name in _formats[fmt][cls]:
return (_formats[fmt][cls][name],
_formats[fmt][cls][name+"_args"])
return None, None
def sniff(fp, cls=None, mode='U'):
"""Attempt to guess the format of a file and return format str.
Parameters
----------
fp : filepath or filehandle
The provided file to guess the format of. Filepaths are automatically
closed; filehandles are the responsibility of the caller.
cls : type, optional
A provided class that restricts the search for the format. Only formats
which have a registered reader or writer for the given `cls` will be
tested.
Default is None.
Returns
-------
(str, kwargs)
A format name and kwargs for the corresponding reader.
Raises
------
UnrecognizedFormatError
This occurs when the format is not 'claimed' by any registered sniffer
or when the format is ambiguous and has been 'claimed' by more than one
sniffer.
See Also
--------
skbio.io.register_sniffer
"""
if not _is_iter_list(fp):
fp = [fp]
factored_format = []
kwargs = []
for f in fp:
possibles = []
for fmt in _sniffers:
if cls is not None and (fmt not in _formats or
cls not in _formats[fmt]):
continue
format_sniffer = _sniffers[fmt]
is_format, fmt_kwargs = format_sniffer(f, mode=mode)
if is_format:
possibles.append(fmt)
kwargs.append(fmt_kwargs)
if not possibles:
raise UnrecognizedFormatError("Cannot guess the format for %s."
% str(f))
if len(possibles) > 1:
raise UnrecognizedFormatError("File format is ambiguous, may be"
" one of %s." % str(possibles))
factored_format.append(possibles[0])
return _compound_format(factored_format), _setup_kwargs(kwargs)
def read(fp, format=None, into=None, verify=True, mode='U', **kwargs):
"""Read a supported skbio file format into an instance or a generator.
This function is able to reference and execute all *registered* read
operations in skbio.
Parameters
----------
fp : filepath, filehandle, or iterable of either
The location to read the given `format` `into`. Filepaths are
automatically closed when read; filehandles are the responsibility
of the caller. In the case of a generator, a filepath will be closed
when ``StopIteration`` is raised; filehandles are still the
responsibility of the caller. If `format` is a compound format, then
`fp` **must** be an iterable of the same length as the compound format.
format : str or iterable of str, optional
The format must be a format name with a reader for the given
`into` class. In the case of compound formats, any order of the simple
formats will work. If a `format` is not provided or is None, all
registered sniffers for the provied `into` class will be evaluated to
attempt to guess the format.
Default is None.
into : type, optional
A class which has a registered reader for a given `format`. If `into`
is not provided or is None, read will return a generator.
Default is None.
verify : bool, optional
Whether or not to confirm the format of a file if `format` is provided.
Will raise a ``skbio.io.UnprovenFormatWarning`` if the sniffer of
`format` returns False.
Default is True.
mode : str, optional
The read mode. This is passed to `open(fp, mode)` internally.
Default is 'U'
kwargs : dict, optional
Will be passed directly to the appropriate reader.
Returns
-------
object or generator
If `into` is not None, an instance of the `into` class will be
provided with internal state consistent with the provided file.
If `into` is None, a generator will be returned.
Raises
------
ValueError
Raised when `format` and `into` are both None.
skbio.io.UnrecognizedFormatError
Raised when a reader could not be found for a given `format` or the
format could not be guessed.
skbio.io.UnprovenFormatWarning
Raised when `verify` is True and the sniffer of a `format` provided a
kwarg value that did not match the user's kwarg value.
See Also
--------
skbio.io.register_reader
skbio.io.register_sniffer
"""
if format is None and into is None:
raise ValueError("`format` and `into` cannot both be None.")
if format is None:
format, fmt_kwargs = sniff(fp, cls=into, mode=mode)
kwargs = _override_kwargs(kwargs, fmt_kwargs, _format_len(format),
verify)
elif verify:
sniffer = get_sniffer(format)
if sniffer is not None:
is_format, fmt_kwargs = sniffer(fp)
if not is_format:
warn("%s could not be positively identified as %s file." %
(str(fp), format),
UnprovenFormatWarning)
else:
kwargs = _override_kwargs(kwargs, fmt_kwargs,
_format_len(format), True)
reader = get_reader(format, into)
if reader is None:
raise UnrecognizedFormatError("Cannot read %s into %s, no reader "
"found." % (format, into.__name__
if into is not None
else 'generator'))
return reader(fp, mode=mode, **kwargs)
def write(obj, format=None, into=None, mode='w', **kwargs):
"""Write a supported skbio file format from an instance or a generator.
This function is able to reference and execute all *registered* write
operations in skbio.
Parameters
----------
obj : object
The object must have a registered writer for a provided `format`.
format : str or iterable of str
The format must be a reigstered format name with a writer for the given
`obj`. In the case of compound formats, any order of the simple
formats will work.
into : filepath, filehandle or iterable of either
The location to write the given `format` from `obj` into. Filepaths are
automatically closed when written; filehandles are the responsibility
of the caller. If `format` is a compound format, then `into` **must**
be an iterable of the same length as the compound format.
mode : str, optional
The write mode. This is passed to `open(fp, mode)` internally.
Default is 'w'.
kwargs : dict, optional
Will be passed directly to the appropriate writer.
Raises
------
ValueError
Raised when `format` or `into` are None.
skbio.io.UnrecognizedFormatError
Raised when a writer could not be found for the given `format` and
`obj`.
See Also
--------
skbio.io.register_writer
"""
if format is None:
raise ValueError("Must specify a `format` to write out as.")
if into is None:
raise ValueError("Must provide a filepath or filehandle for `into`")
cls = None
if not isinstance(obj, types.GeneratorType):
cls = obj.__class__
writer = get_writer(format, cls)
if writer is None:
raise UnrecognizedFormatError("Cannot write %s into %s, no %s writer "
"found." % (format, str(into),
'generator' if cls is None
else str(cls)))
writer(obj, into, mode=mode, **kwargs)
@register_sniffer('<emptyfile>')
def empty_file_sniffer(fh):
for line in fh:
if line.strip():
return False, {}
return True, {}
| |
#!/usr/bin/python2.5
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# Base URL to download snapshots from.
BUILD_BASE_URL = "http://build.chromium.org/buildbot/snapshots/"
# The type (platform) of the build archive. This is what's passed in to the
# '-a/--archive' option.
BUILD_ARCHIVE_TYPE = ''
# The selected archive to bisect.
BUILD_ARCHIVE_DIR = ''
# The location of the builds.
BUILD_ARCHIVE_URL = "/%d/"
# Name of the build archive.
BUILD_ZIP_NAME = ''
# Directory name inside the archive.
BUILD_DIR_NAME = ''
# Name of the executable.
BUILD_EXE_NAME = ''
# URL to the ViewVC commit page.
BUILD_VIEWVC_URL = "http://src.chromium.org/viewvc/chrome?view=rev&revision=%d"
# Changelogs URL
CHANGELOG_URL = "http://build.chromium.org/buildbot/" \
"perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d:%d"
###############################################################################
import math
import optparse
import os
import pipes
import re
import shutil
import sys
import tempfile
import urllib
def SetArchiveVars(archive):
"""Set a bunch of global variables appropriate for the specified archive."""
global BUILD_ARCHIVE_TYPE
global BUILD_ARCHIVE_DIR
global BUILD_ZIP_NAME
global BUILD_DIR_NAME
global BUILD_EXE_NAME
global BUILD_BASE_URL
BUILD_ARCHIVE_TYPE = archive
BUILD_ARCHIVE_DIR = 'chromium-rel-' + BUILD_ARCHIVE_TYPE
if BUILD_ARCHIVE_TYPE in ('linux', 'linux-64'):
BUILD_ZIP_NAME = 'chrome-linux.zip'
BUILD_DIR_NAME = 'chrome-linux'
BUILD_EXE_NAME = "chrome"
elif BUILD_ARCHIVE_TYPE in ('mac'):
BUILD_ZIP_NAME = 'chrome-mac.zip'
BUILD_DIR_NAME = 'chrome-mac'
BUILD_EXE_NAME = "Chromium.app/Contents/MacOS/Chromium"
elif BUILD_ARCHIVE_TYPE in ('xp'):
BUILD_ZIP_NAME = 'chrome-win32.zip'
BUILD_DIR_NAME = 'chrome-win32'
BUILD_EXE_NAME = "chrome.exe"
BUILD_BASE_URL += BUILD_ARCHIVE_DIR
def ParseDirectoryIndex(url):
"""Parses the HTML directory listing into a list of revision numbers."""
handle = urllib.urlopen(url)
dirindex = handle.read()
handle.close()
return re.findall(r'<a href="([0-9]*)/">\1/</a>', dirindex)
def GetRevList(good, bad):
"""Gets the list of revision numbers between |good| and |bad|."""
# Download the main revlist.
revlist = ParseDirectoryIndex(BUILD_BASE_URL)
revlist = map(int, revlist)
revlist = filter(lambda r: range(good, bad).__contains__(int(r)), revlist)
revlist.sort()
return revlist
def TryRevision(rev, profile, args):
"""Downloads revision |rev|, unzips it, and opens it for the user to test.
|profile| is the profile to use."""
# Do this in a temp dir so we don't collide with user files.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
os.chdir(tempdir)
# Download the file.
download_url = BUILD_BASE_URL + (BUILD_ARCHIVE_URL % rev) + BUILD_ZIP_NAME
try:
print 'Fetching ' + download_url
urllib.urlretrieve(download_url, BUILD_ZIP_NAME)
except Exception, e:
print("Could not retrieve the download. Sorry.")
sys.exit(-1)
# Unzip the file.
print 'Unzipping ...'
os.system("unzip -q %s" % BUILD_ZIP_NAME)
# Tell the system to open the app.
args = ['--user-data-dir=%s' % profile] + args
flags = ' '.join(map(pipes.quote, args))
print 'Running %s/%s/%s %s' % (os.getcwd(), BUILD_DIR_NAME, BUILD_EXE_NAME,
flags)
if BUILD_ARCHIVE_TYPE in ('linux', 'linux-64', 'mac'):
os.system("%s/%s %s" % (BUILD_DIR_NAME, BUILD_EXE_NAME, flags))
elif BUILD_ARCHIVE_TYPE in ('xp'):
# TODO(mmoss) Does Windows need 'start' or something?
os.system("%s/%s %s" % (BUILD_DIR_NAME, BUILD_EXE_NAME, flags))
os.chdir(cwd)
print 'Cleaning temp dir ...'
try:
shutil.rmtree(tempdir, True)
except Exception, e:
pass
def AskIsGoodBuild(rev):
"""Annoyingly ask the user whether build |rev| is good or bad."""
while True:
check = raw_input("\nBuild %d is [(g)ood/(b)ad]: " % int(rev))[0]
if (check == "g" or check == "b"):
return (check == "g")
else:
print("Just answer the question...")
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'xp', 'linux', 'linux-64']
parser.add_option('-a', '--archive',
choices = choices,
help = 'The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-b', '--bad', type = 'int',
help = 'The bad revision to bisect to.')
parser.add_option('-g', '--good', type = 'int',
help = 'The last known good revision to bisect from.')
parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
help = 'Profile to use; this will not reset every run. ' +
'Defaults to a clean profile.')
(opts, args) = parser.parse_args()
if opts.archive is None:
parser.print_help()
return 1
if opts.bad and opts.good and (opts.good > opts.bad):
print ('The good revision (%d) must precede the bad revision (%d).\n' %
(opts.good, opts.bad))
parser.print_help()
return 1
SetArchiveVars(opts.archive)
# Pick a starting point, try to get HEAD for this.
if opts.bad:
bad_rev = opts.bad
else:
bad_rev = 0
try:
# Location of the latest build revision number
BUILD_LATEST_URL = "%s/LATEST" % (BUILD_BASE_URL)
nh = urllib.urlopen(BUILD_LATEST_URL)
latest = int(nh.read())
nh.close()
bad_rev = raw_input("Bad revision [HEAD:%d]: " % latest)
if (bad_rev == ""):
bad_rev = latest
bad_rev = int(bad_rev)
except Exception, e:
print("Could not determine latest revision. This could be bad...")
bad_rev = int(raw_input("Bad revision: "))
# Find out when we were good.
if opts.good:
good_rev = opts.good
else:
good_rev = 0
try:
good_rev = int(raw_input("Last known good [0]: "))
except Exception, e:
pass
# Get a list of revisions to bisect across.
revlist = GetRevList(good_rev, bad_rev)
if len(revlist) < 2: # Don't have enough builds to bisect
print "We don't have enough builds to bisect. revlist: %s" % revlist
sys.exit(1)
# If we don't have a |good_rev|, set it to be the first revision possible.
if good_rev == 0:
good_rev = revlist[0]
# These are indexes of |revlist|.
good = 0
bad = len(revlist) - 1
last_known_good_rev = revlist[good]
# Binary search time!
while good < bad:
candidates = revlist[good:bad]
num_poss = len(candidates)
if num_poss > 10:
print("%d candidates. %d tries left." %
(num_poss, round(math.log(num_poss, 2))))
else:
print("Candidates: %s" % revlist[good:bad])
# Cut the problem in half...
test = int((bad - good) / 2) + good
test_rev = revlist[test]
# Let the user give this rev a spin (in her own profile, if she wants).
profile = opts.profile
if not profile:
profile = 'profile' # In a temp dir.
TryRevision(test_rev, profile, args)
if AskIsGoodBuild(test_rev):
last_known_good_rev = revlist[good]
good = test + 1
else:
bad = test
# We're done. Let the user know the results in an official manner.
print("You are probably looking for build %d." % revlist[bad])
print("CHANGELOG URL:")
print(CHANGELOG_URL % (last_known_good_rev, revlist[bad]))
print("Built at revision:")
print(BUILD_VIEWVC_URL % revlist[bad])
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import mox
import webob
from nova.api.openstack.compute import servers
from nova.compute import vm_states
import nova.db
from nova import exception
from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
FAKE_UUID = fakes.FAKE_UUID
def return_server_not_found(context, uuid):
raise exception.NotFound()
def instance_update(context, instance_id, kwargs):
return fakes.stub_instance(instance_id)
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTest(test.TestCase):
def setUp(self):
super(ServerActionsControllerTest, self).setUp()
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(nova.db, 'instance_update', instance_update)
fakes.stub_out_glance(self.stubs)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fakes.stub_out_image_service(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
self.service.delete_all()
self.sent_to_glance = {}
fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self.uuid = FAKE_UUID
self.url = '/v2/fake/servers/%s/action' % self.uuid
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
self.controller = servers.Controller()
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
# note,the mock still contains the password.
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_not_a_string(self):
body = {'changePassword': {'adminPass': 1234}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_empty_string(self):
body = {'changePassword': {'adminPass': ''}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_not_found(self):
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
req, str(utils.gen_uuid()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'reboot', fake_reboot)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
FLAGS.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(nova.db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(nova.compute.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_uuid,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(nova.db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(nova.compute.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
body = {
'rebuild': {
'imageRef': image_href,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue("adminPass" not in body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'rebuild', fake_rebuild)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertTrue('personality' not in body['server'])
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertTrue('adminPass' not in body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
update = self.mox.CreateMockAnything()
self.stubs.Set(nova.compute.API, 'update', update)
req = fakes.HTTPRequest.blank(self.url)
context = req.environ['nova.context']
update(context, mox.IgnoreArg(),
image_ref=self._image_href,
vm_state=vm_states.REBUILDING,
task_state=None, progress=0, **attributes).AndReturn(None)
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_resize(req, FAKE_UUID, body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(nova.compute.api.API, 'confirm_resize', cr_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(nova.compute.api.API,
'confirm_resize',
confirm_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'confirm_resize',
fake_confirm_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(nova.compute.api.API,
'revert_resize',
revert_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_revert_resize(req, FAKE_UUID, body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.api.API, 'revert_resize',
fake_revert_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(FLAGS.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState
self.stubs.Set(nova.compute.API, 'snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
req, FAKE_UUID, body)
class TestServerActionXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerActionXMLDeserializer, self).setUp()
self.deserializer = servers.ActionDeserializer()
def test_create_image(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
},
}
self.assertEquals(request['body'], expected)
def test_create_image_with_metadata(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="key1">value1</meta>
</metadata>
</createImage>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
"metadata": {"key1": "value1"},
},
}
self.assertEquals(request['body'], expected)
def test_change_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "1234pass",
},
}
self.assertEquals(request['body'], expected)
def test_change_pass_no_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_reboot(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"
type="HARD"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"reboot": {
"type": "HARD",
},
}
self.assertEquals(request['body'], expected)
def test_reboot_no_type(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"
flavorRef="http://localhost/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"resize": {"flavorRef": "http://localhost/flavors/3"},
}
self.assertEquals(request['body'], expected)
def test_resize_no_flavor_ref(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_confirm_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<confirmResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"confirmResize": None,
}
self.assertEquals(request['body'], expected)
def test_revert_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<revertResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"revertResize": None,
}
self.assertEquals(request['body'], expected)
def test_rebuild(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost/images/1">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"name": "new-server-test",
"imageRef": "http://localhost/images/1",
"metadata": {
"My Server Name": "Apache1",
},
"personality": [
{"path": "/etc/banner.txt", "contents": "Mg=="},
],
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
},
}
self.assertDictMatch(request['body'], expected)
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
| |
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.providers.gcp.gce_network."""
import contextlib
import unittest
from absl import flags
from absl.testing import flagsaver
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.gcp import gce_network
from tests import pkb_common_test_case
from six.moves import builtins
FLAGS = flags.FLAGS
_PROJECT = 'project'
_CLOUD = 'GCP'
_BENCHMARK_NAME = 'iperf'
_URI = 'uri45678'
_COMPONENT = 'test_component'
_CFG_DEFAULT_DEFAULT = """
iperf:
vm_groups:
vm_1:
cloud: GCP
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
vm_spec:
GCP:
zone: us-central1-c
machine_type: n1-standard-4
"""
_CFG_MULTI_MULTI = """
iperf:
vm_groups:
vm_1:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 192.168.1.0/24
vm_spec:
GCP:
zone: us-central1-c
machine_type: n1-standard-4
"""
_CFG_DEFAULT_MULTI = """
iperf:
vm_groups:
vm_1:
cloud: GCP
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 192.168.1.0/24
vm_spec:
GCP:
zone: us-central1-c
machine_type: n1-standard-4
"""
_CFG_SAME_ZONE_AND_CIDR = """
iperf:
vm_groups:
vm_1:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
"""
_CFG_SAME_ZONE_DIFF_CIDR = """
iperf:
vm_groups:
vm_1:
cloud: GCP
cidr: 10.0.1.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
vm_2:
cloud: GCP
cidr: 10.0.2.0/24
vm_spec:
GCP:
zone: us-west1-b
machine_type: n1-standard-4
"""
_REGEX_GCE_NET_NAMES = r'(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)'
_REGEX_GCE_FW_NAMES = r'(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)'
class BaseGceNetworkTest(pkb_common_test_case.PkbCommonTestCase):
def _CreateBenchmarkSpecFromYaml(self, yaml_string,
benchmark_name=_BENCHMARK_NAME):
config = configs.LoadConfig(yaml_string, {}, benchmark_name)
return self._CreateBenchmarkSpecFromConfigDict(config, benchmark_name)
def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
benchmark_name,
flag_values=FLAGS,
**config_dict)
benchmark_module = next((b for b in linux_benchmarks.BENCHMARKS
if b.BENCHMARK_NAME == benchmark_name))
return benchmark_spec.BenchmarkSpec(benchmark_module, config_spec, _URI)
class TestGceNetworkConfig(BaseGceNetworkTest):
def testLoadDefaultConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(_CFG_DEFAULT_DEFAULT)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 1)
for k in spec.networks.keys():
self.assertItemsEqual(['10.0.0.0/8'], spec.networks[k].all_nets)
def testLoadDefaultConfigWithFlags(self):
FLAGS.gce_subnet_region = 'us-north1-b'
FLAGS.gce_subnet_addr = '1.2.3.4/33'
spec = self._CreateBenchmarkSpecFromYaml(_CFG_DEFAULT_DEFAULT)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 1)
for k in spec.networks.keys():
self.assertItemsEqual(['1.2.3.4/33'], spec.networks[k].all_nets)
def testLoadCustomConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(_CFG_MULTI_MULTI)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': '10.0.1.0/24'},
spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '192.168.1.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 2)
for k in spec.networks.keys():
self.assertItemsEqual(['192.168.1.0/24', '10.0.1.0/24'],
spec.networks[k].all_nets)
def testLoadCustomConfigWithFlags(self):
FLAGS.gce_subnet_region = 'us-north1-b'
FLAGS.gce_subnet_addr = '1.2.3.4/33'
spec = self._CreateBenchmarkSpecFromYaml(_CFG_MULTI_MULTI)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': '10.0.1.0/24'},
spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '192.168.1.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 2)
for k in spec.networks.keys():
self.assertItemsEqual(['192.168.1.0/24', '10.0.1.0/24'],
spec.networks[k].all_nets)
def testLoadMixedConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(_CFG_DEFAULT_MULTI)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '192.168.1.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 2)
for k in spec.networks.keys():
self.assertItemsEqual(['10.0.0.0/8', '192.168.1.0/24'],
spec.networks[k].all_nets)
def testLoadMixedConfigWithFlags(self):
FLAGS.gce_subnet_region = 'us-north1-b'
FLAGS.gce_subnet_addr = '1.2.3.4/33'
spec = self._CreateBenchmarkSpecFromYaml(_CFG_DEFAULT_MULTI)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': None}, spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '192.168.1.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 2)
for k in spec.networks.keys():
self.assertItemsEqual(['1.2.3.4/33', '192.168.1.0/24'],
spec.networks[k].all_nets)
def testLoadSameZoneCidrConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(_CFG_SAME_ZONE_AND_CIDR)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': '10.0.1.0/24'},
spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '10.0.1.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 1)
for k in spec.networks.keys():
self.assertItemsEqual(['10.0.1.0/24'], spec.networks[k].all_nets)
def testLoadSameZoneDiffCidrConfig(self):
spec = self._CreateBenchmarkSpecFromYaml(_CFG_SAME_ZONE_DIFF_CIDR)
with PatchCriticalObjects([('', '', 0)]):
spec.ConstructVirtualMachines()
self.assertDictContainsSubset({'cidr': '10.0.1.0/24'},
spec.custom_subnets['vm_1'])
self.assertDictContainsSubset({'cidr': '10.0.2.0/24'},
spec.custom_subnets['vm_2'])
self.assertLen(spec.networks, 2)
for k in spec.networks.keys():
self.assertItemsEqual(['10.0.1.0/24', '10.0.2.0/24'],
spec.networks[k].all_nets)
class TestGceNetworkNames(BaseGceNetworkTest):
def setUp(self):
super(TestGceNetworkNames, self).setUp()
# need a benchmarkspec in the context to run
FLAGS.run_uri = _URI
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
'cluster_boot', flag_values=FLAGS)
benchmark_spec.BenchmarkSpec(mock.Mock(), config_spec, 'uid')
########
# Network Names
########
def testGetDefaultNetworkName(self):
project = _PROJECT
zone = 'us-north1-b'
cidr = None
# long_cidr = '123.567.901/13' # @TODO net_utils for address sanity checks
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
net_name = net._MakeGceNetworkName()
net_type = 'default'
cidr_string = None
uri = _URI
expected_netname = '-'.join(
i for i in ('pkb-network', net_type, cidr_string, uri) if
i and i not in 'default')
self.assertEqual(expected_netname,
net_name) # pkb-network-uri45678 (default)
self.assertRegexpMatches(net_name, _REGEX_GCE_NET_NAMES)
def testGetSingleNetworkName(self):
FLAGS.gce_subnet_region = 'us-south1-c'
FLAGS.gce_subnet_addr = '2.2.3.4/33'
project = _PROJECT
zone = 'us-north1-b'
cidr = None
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
net_name = net._MakeGceNetworkName()
net_type = 'single'
cidr_string = '2-2-3-4-33'
uri = _URI
expected_netname = '-'.join(
i for i in ('pkb-network', net_type, cidr_string, uri) if
i and i not in 'default')
self.assertEqual(
expected_netname,
net_name) # pkb-network-single-2-2-3-4-33-uri45678 (single)
self.assertRegexpMatches(net_name, _REGEX_GCE_NET_NAMES)
def testGetMultiNetworkName(self):
project = _PROJECT
zone = 'us-north1-b'
cidr = '1.2.3.4/56'
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
net_name = net._MakeGceNetworkName()
net_type = 'multi'
cidr_string = '1-2-3-4-56'
uri = _URI
expected_netname = '-'.join(
i for i in ('pkb-network', net_type, cidr_string, uri) if
i and i not in 'default')
self.assertEqual(expected_netname,
net_name) # pkb-network-multi-1-2-3-4-56-uri45678 (multi)
self.assertRegexpMatches(net_name, _REGEX_GCE_NET_NAMES)
@flagsaver.flagsaver(
gce_network_name='my-network', gce_subnet_name='my-subnet')
def testSpecifyNetworkName(self):
vm = mock.Mock(zone='us-north1-b', project=_PROJECT, cidr='1.2.3.4/56')
net = gce_network.GceNetwork.GetNetwork(vm)
self.assertEqual('my-network', net.network_resource.name)
self.assertEqual('my-subnet', net.subnet_resource.name)
########
# FireWall Names
########
def testGetDefaultFWName(self):
project = _PROJECT
zone = 'us-north1-b'
cidr = None
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
fw_name = net._MakeGceFWRuleName()
net_type = 'default'
src_cidr_string = 'internal'
dst_cidr_string = '10-0-0-0-8'
src_port = None
dst_port = None
uri = _URI
expected_name = '-'.join(
i for i in (
net_type, src_cidr_string, dst_cidr_string, src_port,
dst_port, uri) if i)
self.assertEqual(expected_name, fw_name)
self.assertRegexpMatches(fw_name, _REGEX_GCE_FW_NAMES)
def testGetSingleFWName(self):
FLAGS.gce_subnet_region = 'us-south1-c'
FLAGS.gce_subnet_addr = '2.2.3.4/33'
project = _PROJECT
zone = 'us-north1-b'
cidr = None
lo_port = None
hi_port = None
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
fw_name = net._MakeGceFWRuleName(
net_type=None, src_cidr=None, dst_cidr=None, port_range_lo=lo_port,
port_range_hi=hi_port, uri=None)
net_type = 'single'
src_cidr_string = 'internal'
dst_cidr_string = '2-2-3-4-33'
src_port = None
dst_port = None
uri = _URI
expected_name = '-'.join(
i for i in (net_type, src_cidr_string, dst_cidr_string, src_port,
dst_port, uri)
if i)
self.assertEqual(expected_name,
fw_name) # single-internal-2-2-3-4-33-uri45678
self.assertRegexpMatches(fw_name, _REGEX_GCE_FW_NAMES)
def testGetMultiFWNameWithPorts(self):
project = _PROJECT
zone = 'us-north1-b'
cidr = '1.2.3.4/56'
# cidr = None
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
dst_cidr = None
lo_port = 49152
hi_port = 65535
fw_name = net._MakeGceFWRuleName(
net_type=None, src_cidr=None, dst_cidr=dst_cidr,
port_range_lo=lo_port,
port_range_hi=hi_port, uri=None)
prefix = None
net_type = 'multi'
src_cidr_string = 'internal'
dst_cidr_string = '1-2-3-4-56'
src_port = '49152'
dst_port = '65535'
uri = _URI
expected_name = '-'.join(
i for i in (prefix, net_type, src_cidr_string, dst_cidr_string,
src_port, dst_port, uri) if i)
self.assertEqual(expected_name,
fw_name) # multi-internal-1-2-3-4-56-49152-65535-uri45678
self.assertRegexpMatches(fw_name, _REGEX_GCE_FW_NAMES)
def testGetMultiFWNameWithPortsDst(self):
project = _PROJECT
zone = 'us-north1-b'
cidr = '1.2.3.4/56'
vm = mock.Mock(zone=zone, project=project, cidr=cidr)
net = gce_network.GceNetwork.GetNetwork(vm)
dst_cidr = '123.567.9.1/13'
lo_port = 49152
hi_port = 65535
fw_name = net._MakeGceFWRuleName(
net_type=None, src_cidr=None, dst_cidr=dst_cidr,
port_range_lo=lo_port,
port_range_hi=hi_port, uri=None)
prefix = 'perfkit-firewall'
net_type = 'multi'
src_cidr_string = '1-2-3-4-56'
dst_cidr_string = '123-567-9-1-13'
src_port = '49152'
dst_port = '65535'
uri = _URI
expected_name = '-'.join(
i for i in (prefix, net_type, src_cidr_string, dst_cidr_string,
src_port, dst_port, uri) if i)
# perfkit-firewall-multi-1-2-3-4-56-123-567-901-13-49152-65535-uri45678
self.assertEqual(expected_name, fw_name)
self.assertRegexpMatches(fw_name, _REGEX_GCE_FW_NAMES)
# found in tests/gce_virtual_machine_test.py
@contextlib.contextmanager
def PatchCriticalObjects(retvals=None):
"""A context manager that patches a few critical objects with mocks."""
def ReturnVal(*unused_arg, **unused_kwargs):
del unused_arg
del unused_kwargs
return ('', '', 0) if retvals is None else retvals.pop(0)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
side_effect=ReturnVal) as issue_command, mock.patch(
builtins.__name__ + '.open'), mock.patch(vm_util.__name__ +
'.NamedTemporaryFile'):
yield issue_command
class TestGceNetwork(BaseGceNetworkTest):
def setUp(self):
super(TestGceNetwork, self).setUp()
# need a benchmarkspec in the context to run
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
'cluster_boot', flag_values=FLAGS)
benchmark_spec.BenchmarkSpec(mock.Mock(), config_spec, 'uid')
@mock.patch.object(vm_util, 'IssueCommand', return_value=('', '', 0))
def testGetNetwork(self, mock_issue):
project = 'myproject'
zone = 'us-east1-a'
vm = mock.Mock(zone=zone, project=project, cidr=None, mtu=None)
net = gce_network.GceNetwork.GetNetwork(vm)
net.Create()
self.assertEqual(project, net.project)
self.assertEqual(zone, net.zone)
self.assertIsNone(net.mtu)
create_network_cmd = mock_issue.call_args_list[0][0][0]
self.assertRegex(' '.join(create_network_cmd), 'compute networks create ')
# --mtu does NOT appear in the compute network create command
self.assertNotIn('--mtu', ' '.join(create_network_cmd))
@mock.patch.object(vm_util, 'IssueCommand', return_value=('', '', 0))
def testMtuSupport(self, mock_issue):
vm = mock.Mock(project='abc', cidr=None, mtu=1500)
net = gce_network.GceNetwork.GetNetwork(vm)
net.Create()
self.assertEqual(1500, net.mtu)
create_network_cmd = mock_issue.call_args_list[0][0][0]
self.assertRegex(' '.join(create_network_cmd),
'compute networks create .*--mtu 1500')
class GceFirewallRuleTest(pkb_common_test_case.PkbCommonTestCase):
@mock.patch('time.sleep', side_effect=lambda _: None)
def testGceFirewallRuleSuccessfulAfterRateLimited(self, mock_cmd):
fake_rets = [('stdout', 'Rate Limit Exceeded', 1),
('stdout', 'some warning perhaps', 0)]
with PatchCriticalObjects(fake_rets) as issue_command:
fr = gce_network.GceFirewallRule('name', 'project', 'allow',
'network_name')
fr._Create()
self.assertEqual(issue_command.call_count, 2)
@mock.patch('time.sleep', side_effect=lambda _: None)
def testGceFirewallRuleGenericErrorAfterRateLimited(self, mock_cmd):
fake_rets = [('stdout', 'Rate Limit Exceeded', 1),
('stdout', 'Rate Limit Exceeded', 1),
('stdout', 'some random firewall error', 1)]
with PatchCriticalObjects(fake_rets) as issue_command:
with self.assertRaises(errors.VmUtil.IssueCommandError):
fr = gce_network.GceFirewallRule('name', 'project', 'allow',
'network_name')
fr._Create()
self.assertEqual(issue_command.call_count, 3)
@mock.patch('time.sleep', side_effect=lambda _: None)
def testGceFirewallRuleAlreadyExistsAfterRateLimited(self, mock_cmd):
fake_rets = [('stdout', 'Rate Limit Exceeded', 1),
('stdout', 'Rate Limit Exceeded', 1),
('stdout', 'firewall already exists', 1)]
with PatchCriticalObjects(fake_rets) as issue_command:
fr = gce_network.GceFirewallRule('name', 'project', 'allow',
'network_name')
fr._Create()
self.assertEqual(issue_command.call_count, 3)
@mock.patch('time.sleep', side_effect=lambda _: None)
def testGceFirewallRuleGenericError(self, mock_cmd):
fake_rets = [('stdout', 'some random firewall error', 1)]
with PatchCriticalObjects(fake_rets) as issue_command:
with self.assertRaises(errors.VmUtil.IssueCommandError):
fr = gce_network.GceFirewallRule('name', 'project', 'allow',
'network_name')
fr._Create()
self.assertEqual(issue_command.call_count, 1)
if __name__ == '__main__':
unittest.main()
| |
from fontParts.base.base import BaseDict, dynamicProperty, reference
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedGroups, RemovedGroups
class BaseGroups(BaseDict, DeprecatedGroups, RemovedGroups):
"""
A Groups object. This object normally created as part of a
:class:`BaseFont`. An orphan Groups object can be created like this::
>>> groups = RGroups()
This object behaves like a Python dictionary. Most of the dictionary
functionality comes from :class:`BaseDict`, look at that object for the
required environment implementation details.
Groups uses :func:`normalizers.normalizeGroupKey` to normalize the key of
the ``dict``, and :func:`normalizers.normalizeGroupValue` to normalize the
value of the ``dict``.
"""
keyNormalizer = normalizers.normalizeGroupKey
valueNormalizer = normalizers.normalizeGroupValue
def _reprContents(self):
contents = []
if self.font is not None:
contents.append("for font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty("font", "The Groups' parent :class:`BaseFont`.")
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None and self._font != font:
raise AssertionError("font for groups already set and is not same as font")
if font is not None:
font = reference(font)
self._font = font
# ---------
# Searching
# ---------
def findGlyph(self, glyphName):
"""
Returns a ``list`` of the group or groups associated with
**glyphName**.
**glyphName** will be an :ref:`type-string`. If no group is found
to contain **glyphName** an empty ``list`` will be returned. ::
>>> font.groups.findGlyph("A")
["A_accented"]
"""
glyphName = normalizers.normalizeGlyphName(glyphName)
groupNames = self._findGlyph(glyphName)
groupNames = [self.keyNormalizer.__func__(
groupName) for groupName in groupNames]
return groupNames
def _findGlyph(self, glyphName):
"""
This is the environment implementation of
:meth:`BaseGroups.findGlyph`. **glyphName** will be
an :ref:`type-string`.
Subclasses may override this method.
"""
found = []
for key, groupList in self.items():
if glyphName in groupList:
found.append(key)
return found
# --------------
# Kerning Groups
# --------------
side1KerningGroups = dynamicProperty(
"base_side1KerningGroups",
"""
All groups marked as potential side 1
kerning members.
>>> side1Groups = groups.side1KerningGroups
The value will be a :ref:`dict` with
:ref:`string` keys representing group names
and :ref:`tuple` contaning glyph names.
"""
)
def _get_base_side1KerningGroups(self):
kerningGroups = self._get_side1KerningGroups()
normalized = {}
for name, members in kerningGroups.items():
name = normalizers.normalizeGroupKey(name)
members = normalizers.normalizeGroupValue(members)
normalized[name] = members
return normalized
def _get_side1KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern1."):
found[name] = contents
return found
side2KerningGroups = dynamicProperty(
"base_side2KerningGroups",
"""
All groups marked as potential side 1
kerning members.
>>> side2Groups = groups.side2KerningGroups
The value will be a :ref:`dict` with
:ref:`string` keys representing group names
and :ref:`tuple` contaning glyph names.
"""
)
def _get_base_side2KerningGroups(self):
kerningGroups = self._get_side2KerningGroups()
normalized = {}
for name, members in kerningGroups.items():
name = normalizers.normalizeGroupKey(name)
members = normalizers.normalizeGroupValue(members)
normalized[name] = members
return normalized
def _get_side2KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern2."):
found[name] = contents
return found
# ---------------------
# RoboFab Compatibility
# ---------------------
def remove(self, groupName):
"""
Removes a group from the Groups. **groupName** will be
a :ref:`type-string` that is the group name to
be removed.
This is a backwards compatibility method.
"""
del self[groupName]
def asDict(self):
"""
Return the Groups as a ``dict``.
This is a backwards compatibility method.
"""
d = {}
for k, v in self.items():
d[k] = v
return d
# -------------------
# Inherited Functions
# -------------------
def __contains__(self, groupName):
"""
Tests to see if a group name is in the Groups.
**groupName** will be a :ref:`type-string`.
This returns a ``bool`` indicating if the **groupName**
is in the Groups. ::
>>> "myGroup" in font.groups
True
"""
return super(BaseGroups, self).__contains__(groupName)
def __delitem__(self, groupName):
"""
Removes **groupName** from the Groups. **groupName** is a
:ref:`type-string`.::
>>> del font.groups["myGroup"]
"""
super(BaseGroups, self).__delitem__(groupName)
def __getitem__(self, groupName):
"""
Returns the contents of the named group. **groupName** is a
:ref:`type-string`. The returned value will be a
:ref:`type-immutable-list` of the group contents.::
>>> font.groups["myGroup"]
("A", "B", "C")
It is important to understand that any changes to the returned group
contents will not be reflected in the Groups object. If one wants to
make a change to the group contents, one should do the following::
>>> group = font.groups["myGroup"]
>>> group.remove("A")
>>> font.groups["myGroup"] = group
"""
return super(BaseGroups, self).__getitem__(groupName)
def __iter__(self):
"""
Iterates through the Groups, giving the key for each iteration. The
order that the Groups will iterate though is not fixed nor is it
ordered.::
>>> for groupName in font.groups:
>>> print groupName
"myGroup"
"myGroup3"
"myGroup2"
"""
return super(BaseGroups, self).__iter__()
def __len__(self):
"""
Returns the number of groups in Groups as an ``int``.::
>>> len(font.groups)
5
"""
return super(BaseGroups, self).__len__()
def __setitem__(self, groupName, glyphNames):
"""
Sets the **groupName** to the list of **glyphNames**. **groupName**
is the group name as a :ref:`type-string` and **glyphNames** is a
``list`` of glyph names as :ref:`type-string`.
>>> font.groups["myGroup"] = ["A", "B", "C"]
"""
super(BaseGroups, self).__setitem__(groupName, glyphNames)
def clear(self):
"""
Removes all group information from Groups,
resetting the Groups to an empty dictionary. ::
>>> font.groups.clear()
"""
super(BaseGroups, self).clear()
def get(self, groupName, default=None):
"""
Returns the contents of the named group.
**groupName** is a :ref:`type-string`, and the returned values will
either be :ref:`type-immutable-list` of group contents or ``None``
if no group was found. ::
>>> font.groups["myGroup"]
("A", "B", "C")
It is important to understand that any changes to the returned group
contents will not be reflected in the Groups object. If one wants to
make a change to the group contents, one should do the following::
>>> group = font.groups["myGroup"]
>>> group.remove("A")
>>> font.groups["myGroup"] = group
"""
return super(BaseGroups, self).get(groupName, default)
def items(self):
"""
Returns a list of ``tuple`` of each group name and group members.
Group names are :ref:`type-string` and group members are a
:ref:`type-immutable-list` of :ref:`type-string`. The initial
list will be unordered.
>>> font.groups.items()
[("myGroup", ("A", "B", "C"), ("myGroup2", ("D", "E", "F"))]
"""
return super(BaseGroups, self).items()
def keys(self):
"""
Returns a ``list`` of all the group names in Groups. This list will be
unordered.::
>>> font.groups.keys()
["myGroup4", "myGroup1", "myGroup5"]
"""
return super(BaseGroups, self).keys()
def pop(self, groupName, default=None):
"""
Removes the **groupName** from the Groups and returns the list of
group members. If no group is found, **default** is returned.
**groupName** is a :ref:`type-string`. This must return either
**default** or a :ref:`type-immutable-list` of glyph names as
:ref:`type-string`.
>>> font.groups.pop("myGroup")
("A", "B", "C")
"""
return super(BaseGroups, self).pop(groupName, default)
def update(self, otherGroups):
"""
Updates the Groups based on **otherGroups**. *otherGroups** is a
``dict`` of groups information. If a group from **otherGroups** is in
Groups, the group members will be replaced by the group members from
**otherGroups**. If a group from **otherGroups** is not in the Groups,
it is added to the Groups. If Groups contain a group name that is not
in *otherGroups**, it is not changed.
>>> font.groups.update(newGroups)
"""
super(BaseGroups, self).update(otherGroups)
def values(self):
"""
Returns a ``list`` of each named group's members.
This will be a list of lists, the group members will be a
:ref:`type-immutable-list` of :ref:`type-string`. The initial
list will be unordered.
>>> font.groups.items()
[("A", "B", "C"), ("D", "E", "F")]
"""
return super(BaseGroups, self).values()
| |
# Copyright 2012 One Laptop per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
#from gettext import gettext as _
import gettext
import math
import gtk
import gobject
import gconf
from sugar.activity import activity
from sugar.graphics.toolbarbox import ToolbarBox
from sugar.activity.widgets import ActivityToolbarButton
from sugar.activity.widgets import StopButton
from sugar.graphics import style
from sugar.graphics.toolbutton import ToolButton
from sugar.graphics.icon import Icon
DEFAULT_CHANGE_IMAGE_TIME = 1.5
POWERD_INHIBIT_DIR = '/var/run/powerd-inhibit-suspend'
class WelcomeActivity(activity.Activity):
"""WelcomeActivity class as specified in activity.info"""
def __init__(self, handle):
activity.Activity.__init__(self, handle)
# we do not have collaboration features
# make the share option insensitive
self.max_participants = 1
# toolbar with the new toolbar redesign
toolbar_box = ToolbarBox()
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
toolbar_box.toolbar.insert(gtk.SeparatorToolItem(), -1)
self.image_viewer = ImageCollectionViewer(False)
prev_bt = ToolButton("go-previous-paired")
prev_bt.connect("clicked", self.image_viewer.prev_anim_clicked_cb,
None)
toolbar_box.toolbar.insert(prev_bt, -1)
next_bt = ToolButton("go-next-paired")
next_bt.connect("clicked", self.image_viewer.next_anim_clicked_cb,
None)
toolbar_box.toolbar.insert(next_bt, -1)
separator = gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
toolbar_box.toolbar.insert(separator, -1)
stop_button = StopButton(self)
toolbar_box.toolbar.insert(stop_button, -1)
self.set_toolbar_box(toolbar_box)
toolbar_box.show_all()
self.modify_bg(gtk.STATE_NORMAL, style.COLOR_WHITE.get_gdk_color())
self.set_canvas(self.image_viewer)
def can_close(self):
self.image_viewer.finish()
return True
class CustomButton(gtk.EventBox):
def __init__(self, icon, size):
super(gtk.EventBox, self).__init__()
image = gtk.Image()
path = os.path.expanduser('~/Activities/Welcome.activity/icons/')
pxb = gtk.gdk.pixbuf_new_from_file_at_size('%s/%s.svg' % (path, icon),
size, size)
image.set_from_pixbuf(pxb)
self.add(image)
self.modify_bg(gtk.STATE_NORMAL, style.COLOR_WHITE.get_gdk_color())
class ImageCollectionViewer(gtk.VBox):
__gtype_name__ = 'WelcomeDialog'
def __init__(self, start_window=True):
super(gtk.VBox, self).__init__()
self.using_powerd = self._verify_powerd_directory()
self._inhibit_suspend()
self.image = gtk.Image()
self.pack_start(self.image, True, True, padding=0)
# images management
images_path = \
os.path.expanduser('~/Activities/Welcome.activity/images/')
self.anim_order = 0
self.image_order = 0
self.auto_change_anim = True
self.animation_list = []
for fname in sorted(os.listdir(images_path)):
if os.path.isdir(images_path + fname):
anim_path = images_path + fname
logging.debug('Animation dir file: %s', anim_path)
animation_images_list = []
for imagefname in sorted(os.listdir(anim_path)):
image_path = os.path.join(anim_path, imagefname)
animation_images_list.append(image_path)
logging.debug('Image file: %s', image_path)
self.animation_list.append(animation_images_list)
else:
self.animation_list.append([images_path + fname])
if self.animation_list:
self._update_image()
if start_window:
# Create bottom controls
bottom_toolbar = gtk.HBox()
self.pack_start(bottom_toolbar, False, padding=style.zoom(30))
left_box = gtk.HBox()
bottom_toolbar.pack_start(left_box, False, padding=0)
center_align = gtk.Alignment(0.5, 0, 0, 0)
center_box = gtk.HBox()
center_align.add(center_box)
bottom_toolbar.pack_start(center_align, True, True, padding=0)
right_box = gtk.HBox()
bottom_toolbar.pack_start(right_box, False, padding=0)
# init gettext
locale_path = \
os.path.expanduser('~/Activities/Welcome.activity/locale/')
gettext.bindtextdomain('org.laptop.WelcomeActivity', locale_path)
gettext.textdomain('org.laptop.WelcomeActivity')
_ = gettext.gettext
_next_button = gtk.Button()
_next_button.set_label(_('Next'))
next_image = Icon(icon_name='go-right')
_next_button.set_image(next_image)
_next_button.connect('clicked', self.__next_clicked_cb)
self.sequence_view = SequenceView(len(self.animation_list))
right_box.pack_end(_next_button, False, False,
padding=style.zoom(30))
bt_width, bt_height = _next_button.size_request()
prev_bt = CustomButton('go-previous-paired-grey', bt_height)
center_box.pack_start(prev_bt, False, False, 5)
prev_bt.connect('button-press-event', self.prev_anim_clicked_cb)
center_box.pack_start(self.sequence_view, False, False, padding=5)
next_bt = CustomButton('go-next-paired-grey', bt_height)
center_box.pack_start(next_bt, False, False, 5)
next_bt.connect('button-press-event', self.next_anim_clicked_cb)
# do the right_box and left_box have the same size
width = int(gtk.gdk.screen_width() / 4)
right_box.set_size_request(width, -1)
left_box.set_size_request(width, -1)
else:
center_align = gtk.Alignment(0.5, 0, 0, 0)
self.pack_start(center_align, False, padding=2)
self.sequence_view = SequenceView(len(self.animation_list))
center_align.add(self.sequence_view)
self.show_all()
# calculate space available for images
# (only to tell to the designers)
height_av = gtk.gdk.screen_height() - style.GRID_CELL_SIZE * 2
width_av = gtk.gdk.screen_width()
print 'Size available for image: %d x %d' % (width_av, height_av)
def __next_clicked_cb(self, button):
self._allow_suspend()
gtk.main_quit()
def auto_change_image(self):
# Change to the next image in the animation,
# if is the last, change to the next animation
if self.image_order < len(self.animation_list[self.anim_order]) - 1:
self.image_order += 1
self._update_image()
else:
if self.auto_change_anim:
self.next_anim_clicked_cb(None, None)
else:
self.image_order = 0
self._update_image()
return False
def next_anim_clicked_cb(self, button, event):
if button is not None:
gobject.source_remove(self.timer_id)
self.auto_change_anim = False
self.image_order = 0
self.anim_order += 1
if self.anim_order == len(self.animation_list):
self.anim_order = 0
if self.sequence_view is not None:
self.sequence_view.set_value(self.anim_order + 1)
self.get_root_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
gobject.idle_add(self._update_image)
def prev_anim_clicked_cb(self, button, event):
if button is not None:
gobject.source_remove(self.timer_id)
self.auto_change_anim = False
self.image_order = 0
self.anim_order -= 1
if self.anim_order < 0:
self.anim_order = len(self.animation_list) - 1
if self.sequence_view is not None:
self.sequence_view.set_value(self.anim_order + 1)
self.get_root_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
gobject.idle_add(self._update_image)
def _update_image(self):
image_file_name = \
self.animation_list[self.anim_order][self.image_order]
self.image.set_from_file(image_file_name)
self.get_root_window().set_cursor(gtk.gdk.Cursor(gtk.gdk.LEFT_PTR))
time = int(DEFAULT_CHANGE_IMAGE_TIME * 1000)
if self.image_order == 0:
time = time * 2
self.timer_id = gobject.timeout_add(time, self.auto_change_image)
def finish(self):
self._allow_suspend()
def _verify_powerd_directory(self):
using_powerd = os.access(POWERD_INHIBIT_DIR, os.W_OK)
logging.debug("using_powerd: %d", using_powerd)
return using_powerd
def _inhibit_suspend(self):
if self.using_powerd:
flag_file_name = self._powerd_flag_name()
try:
file(flag_file_name, 'w').write('')
logging.debug("inhibit_suspend file is %s", flag_file_name)
except OSError, IOError:
pass
def _allow_suspend(self):
if self.using_powerd:
flag_file_name = self._powerd_flag_name()
try:
os.unlink(flag_file_name)
logging.debug("allow_suspend unlinking %s", flag_file_name)
except OSError, IOError:
pass
def _powerd_flag_name(self):
return POWERD_INHIBIT_DIR + "/%u" % os.getpid()
class SequenceView(gtk.DrawingArea):
def __init__(self, cant, point_size=10):
super(gtk.DrawingArea, self).__init__()
self._cant = cant
self._value = 1
self._point_size = point_size
logging.error('init SequenceView cant= %d', self._cant)
self.set_size_request(self._point_size * self._cant * 2,
self._point_size)
self.connect('expose_event', self.__expose_cb)
def size_allocate_cb(widget, allocation):
self._width, self._height = allocation.width, allocation.height
self.disconnect(self._setup_handle)
self._setup_handle = self.connect('size_allocate',
size_allocate_cb)
def __expose_cb(self, widget, event):
rect = self.get_allocation()
ctx = widget.window.cairo_create()
# set a clip region for the expose event
ctx.rectangle(event.area.x, event.area.y, event.area.width,
event.area.height)
ctx.clip()
self.draw(ctx)
def set_value(self, value):
self._value = value
self.queue_draw()
def draw(self, ctx):
if self._cant == 0:
return
ctx.save()
radio = self._point_size / 2.0
ctx.translate(0, 0)
ctx.rectangle(0, 0, self._width, self._height)
ctx.set_source_rgb(1.0, 1.0, 1.0)
ctx.fill()
ctx.translate(radio, self._height / 2 - radio)
for n in range(self._cant):
if n < self._value:
ctx.set_source_rgb(0.913, 0.733, 0.0) # eebb00
else:
ctx.set_source_rgb(0.33, 0.33, 0.33) # grey
ctx.arc(radio, radio, radio, 0., 2 * math.pi)
ctx.fill()
ctx.translate(self._point_size * 2, 0)
ctx.restore()
def set_fonts():
client = gconf.client_get_default()
face = client.get_string('/desktop/sugar/font/default_face')
size = client.get_float('/desktop/sugar/font/default_size')
settings = gtk.settings_get_default()
settings.set_property("gtk-font-name", "%s %f" % (face, size))
def main():
set_fonts()
win = gtk.Window()
image_viewer = ImageCollectionViewer()
win.add(image_viewer)
win.set_size_request(gtk.gdk.screen_width(), gtk.gdk.screen_height())
win.set_position(gtk.WIN_POS_CENTER_ALWAYS)
win.set_decorated(False)
win.modify_bg(gtk.STATE_NORMAL, style.COLOR_WHITE.get_gdk_color())
win.show_all()
win.connect("destroy", gtk.main_quit)
gtk.main()
if __name__ == "__main__":
main()
| |
from __future__ import with_statement
import os, re, glob
#def to_strong(s):
# s = re.sub(r'\{\{\*', '<strong>', s)
# s = re.sub(r'\*\}\}', '</strong>', s)
# return s
#
#def to_unstrong(str):
# s = re.sub(r'\{\{\*', '', s)
# s = re.sub(r'\*\}\}', '', s)
# return s
#
#def pre_to_console(s):
# s = re.sub('<pre class="literal-block">\n\\$', '<pre class="console">\n$', s)
# return s
#
#def pre_to_file(s):
# s = re.sub('<pre class="literal-block">\n', '<pre class="file">\n', s)
# return s
tagfile = prop('tagfile', 'html-css')
#dir = 'data'
testdir = '../test'
datadir = testdir + '/data'
#title = "Tenjin User's Guide"
#stylesheet = 'html4css1.css'
stylesheet = 'docstyle.css'
#rstdir = '/Library/Frameworks/Python.framework/Versions/2.4/bin'
#rst2html = rstdir+'/rst2html.py'
#rst2html_opts = 'rst2html_opts', '--link-stylesheet --no-xml-declaration --no-source-link --no-toc-backlinks --language=en --stylesheet="%s" --title="%s"' % (stylesheet, title) #--strip-comments
tidy_opts = 'tidy_opts', '-q -i -wrap 9999 --hide-comments yes'
#users_guide_eruby = 'users-guide.eruby'
original_docdir = '../../common/doc/'
users_guide_eruby = original_docdir + 'users-guide.eruby'
faq_eruby = original_docdir + 'faq.eruby'
examples_eruby = original_docdir + 'examples.eruby'
kook_default_product = 'all'
testfiles = ['test_users_guide.py', 'test_faq.py']
basenames = ['users-guide', 'faq', 'examples']
textfiles = [ x+'.txt' for x in basenames]
htmlfiles = [ x+'.html' for x in basenames]
@ingreds('doc', 'test')
def task_all(c):
pass
@ingreds(htmlfiles, stylesheet)
def task_doc(c):
"""generate *.html"""
pass
@product(stylesheet)
@ingreds(original_docdir + stylesheet)
def file_css(c):
cp(c.ingred, c.product)
#@product('users-guide.html')
#@ingreds('users-guide.txt', 'retrieve')
#@byprods('users-guide.toc.html')
#def file_users_guide_html(c):
# system(c%'kwaser -t $(tagfile) -T $(ingred) > $(byprod)')
# system(c%'kwaser -t $(tagfile) $(ingred) > $(product)')
# rm(c.byproducts)
#@product('users-guide.html')
#@ingreds('users-guide.rst')
#@byprods('users-guide.tmp')
#def file_users_guide_html(c):
# system_f(c%'$(rst2html) $(rst2html_opts) $(ingreds) 2>&1 > $(byprod)')
# s = open(c.byprod).read()
# s = to_string(s)
# s = pre_to_console(s)
# s = pre_to_file(s)
# open(c.byprod, 'w').write(s)
# system_f(c%'tidy $(tidy_opts) $(byprod) > $(product)')
# rm(c.byprod)
@product('*.html')
@ingreds('$(1).txt')
@byprods('$(1).toc.html')
def file_html(c):
system(c%'kwaser -t $(tagfile) -T $(ingred) > $(byprod)')
system(c%'kwaser -t $(tagfile) $(ingred) > $(product)')
system_f(c%'tidy -i -w 9999 -utf8 -m -q $(product)')
f = (
(re.compile(r'^ <meta name="generator" content="HTML Tidy .*?\n', re.M), ''),
(re.compile(r'^ <meta http-equiv="Content-Type" content="text/html">\n\n?', re.M),
' <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">\n'),
)
edit(c.product, by=f)
@product('*.txt')
@ingreds(original_docdir + '$(1).eruby')
#@ingreds('$(1).eruby', if_exists(original_docdir + '/$(1).eruby')
def file_txt(c):
"""create *.txt from *.eruby and retrieve testdata from *.txt"""
#if os.path.exists(c.ingreds[1]):
# cp(c.ingreds[1], c.ingred)
system(c%"erubis -E PercentLine -c '@lang=%q|python|' -p '\\[% %\\]' $(ingred) > $(product)");
#
name = re.sub(r'\.txt$', '', c.product.replace('-', '_'))
datadir = testdir + '/data/' + name
if os.path.exists(datadir):
rm_rf(datadir + '/*')
else:
mkdir(datadir)
system(c%"which retrieve");
system(c%"retrieve -Fd $(datadir) $(product)");
pat = datadir + '/*.result2'
filenames = glob.glob(datadir + '/*.result2')
for filename in filenames:
content = open(filename).read()
os.unlink(filename)
rexp = re.compile(r'^\$ ', re.M)
contents = rexp.split(content)
i = 0
for cont in contents:
if not cont: continue
i += 1
fname = re.sub(r'\.result2$', '%s.result' % i, filename)
open(fname, 'w').write('$ ' + cont)
#
if name == 'faq':
cp('../misc/my_template.py', datadir)
@ingreds(testdir + '/test_users_guide.py',
testdir + '/test_faq.py',
testdir + '/test_examples.py')
def task_create_test(c):
"""create test script"""
pass
#@ingreds('users-guide.txt', 'faq.txt', 'examples.txt')
#def task_retrieve(c):
# pass
def task_clean(c):
rm_rf('*.toc.html', 'test.log', '*.pyc')
@ingreds('test_users_guide', 'test_faq', 'test_examples')
def task_test(c):
pass
@ingreds(testdir+'/test_users_guide.py', 'users-guide.txt')
def task_test_users_guide(c):
name = re.sub(r'^test_', '', c.product)
with chdir(testdir) as d:
system(c%'python $(ingred)')
@ingreds(testdir+'/test_faq.py', 'faq.txt')
def task_test_faq(c):
task_test_users_guide(c)
@ingreds(testdir+'/test_examples.py', 'examples.txt')
def task_test_examples(c):
task_test_users_guide(c)
@product(testdir + '/test_users_guide.py')
@ingreds(testdir + '/test_users-guide.py')
def task_test_users_guide_py(c):
mv(c.ingred, c.product)
#mv("data/users-guide", "data/users_guide")
@product(testdir+'/test_*.py')
def task_test_py(c):
## base name
#base = c[1]
m = re.search(r'test_(.*)\.py$', c.product)
base = m.group(1)
name = base.replace('-', '_')
## class name
classname = ''.join([x.capitalize() for x in re.split(r'[-_]', base)]) + 'Test'
## header
buf = []
buf.append(c%"""
#
# auto generated
#
import unittest, os, re
from testcase_helper import *
class $(classname)(unittest.TestCase, TestCaseHelper):
basedir = '$(datadir)/$(name)'
DIR = (os.path.dirname(__file__) or '.') + '/' + basedir
CWD = os.getcwd()
def setUp(self):
os.chdir(self.__class__.DIR)
def tearDown(self):
os.chdir(self.__class__.CWD)
def _test(self):
filename = self.filename;
dirname = os.path.dirname(filename)
pwd = os.getcwd()
if dirname:
os.chdir(dirname)
filename = os.path.basename(filename)
s = open(filename).read()
pat = r'\\A\\$ (.*?)\\n'
m = re.match(pat, s)
command = m.group(1)
expected = re.sub(pat, '', s)
result = os.popen(command).read()
self.assertTextEqual(expected, result)
""")
## body
buf.append("""
from glob import glob
import os
filenames = []
filenames.extend(glob('%s/*.result' % basedir))
filenames.extend(glob('%s/*/*.result' % basedir))
filenames.extend(glob('%s/*.source' % basedir))
filenames.extend(glob('%s/*/*.source' % basedir))
for filename in filenames:
#name = os.path.basename(filename).replace('.result', '')
name = filename.replace(basedir+'/', '')
s = "\\n".join((
"def test_%s(self):" % re.sub('[^\w]', '_', name),
" self.filename = '%s'" % name,
" self._test()",
))
exec(s)
""")
files = glob.glob(datadir + '/*.result')
for file in files:
name = re.sub(r'\.result$', '', os.path.basename(file))
buf.append(c%"""
def test_$(name)(self):
self.name = '$(name)'
self._test()
""")
## footer
buf.append(c%"""
remove_unmatched_test_methods($(classname))
if __name__ == '__main__':
unittest.main()
""")
s = ''.join(buf)
open(c.product, 'w').write(''.join(buf))
print(c%"** '$(product)' created.")
#cp(c.product, c%"../test/$(product)")
| |
import mysql.connector
import json
import os
import sys
import requests
import urllib2
from bs4 import BeautifulSoup
from time import sleep
def get_user_pass():
f=open('data/userpass.txt')
for line in f:
c=line.split(' ')
user,pas = c[0].strip(),c[1].strip()
f.close()
print (user,pas)
return (user,pas)
class db_access(object):
"""Access database
"""
def __init__(self, db_name, usr, pwd=None):
print 'in db_access'
self.db_name = db_name
self.db_url = "localhost"
self.connect(usr, pwd)
self.cursor = self.cnx.cursor()
def query(self,qry):
self.cursor.execute(qry)
return [list(r) for r in self.cursor]
def connect(self, usr, pwd=None):
#try to connect to database
try:
self.cnx = mysql.connector.connect(user=usr, password=pwd,
database=self.db_name, host=self.db_url)
except mysql.connector.Error as err:
print(err)
sys.exit(1)
def close(self):
"""Disconnect from DB
"""
self.cursor.close()
self.cnx.close()
def cast_to_sql():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
files_list = os.listdir('json/casts')
for f in files_list:
fp = open('json/casts/'+f,'r')
try:
alldata = json.load(fp)
imdb_id = alldata['imdb_id']
for i in range(len(alldata['cast'])):
c = alldata['cast'][i]
print imdb_id
qry = "SELECT * FROM casts where imdb_id = "+"\""+imdb_id+ \
"\" and actor_id = "+"\""+c['id']+"\";"
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
if casts: continue
params = ("\""+c['id']+"\"", "\""+imdb_id+"\"", "\""+alldata['rt_ind']+"\"", \
"\""+c['name']+"\"", "\""+c['characters'][0]+"\"", i)
qry = "INSERT INTO casts "
qry += "(actor_id, imdb_id, rt_id, nam, charac, billed_num) "
qry += "VALUES (%s, %s, %s, %s, %s, %d);" % params
print qry;
mydb.cursor.execute(qry)
mydb.cnx.commit()
except Exception,e:
print e
fp.close()
def download_casts():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
qry = ("""SELECT imdb_id,rt_id,title,cast FROM rt_info""")
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
for [imdb_id,rt_id,title,cast_link] in casts:
print title
if os.path.isfile('json/casts/cast'+str(imdb_id)+'.json'):
continue
f = open('json/casts/cast'+str(imdb_id)+'.json','w')
url = cast_link
url += '?apikey=8pfmg4jgr8z3zcrpubh4v8pg'
r = requests.get(url)
js = r.json()
d = dict(js)
if 'error' in d and d['error'] == 'Gateway Timeout':
continue
elif 'error' in d:
print 'error'
tempdata = d['cast']
data = {'imdb_id':imdb_id,'rt_ind':rt_id,'cast':tempdata}
f.write(json.dumps(data))
f.close()
sleep(.2)
def rt_to_sql():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
files_list = os.listdir('json/movies')
for f in files_list:
imdb_id = f[9:f.index('.')]
fp = open('json/movies/'+f,'r')
js = json.loads(fp.read())
fp.close
for m in js:
params = ("\""+imdb_id+"\"","\""+m['id']+"\"","\""+m['title']+"\"", \
m['ratings']['critics_score'], \
m['ratings']['audience_score'],"\""+m['links']['cast']+"\"", \
"\""+m['posters']['thumbnail']+"\"", "\""+m['posters']['original']+"\"",)
qry = "SELECT * FROM rt_info where imdb_id = "+"\""+imdb_id+"\""
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
if casts: continue
qry = "INSERT INTO rt_info "
qry += "(imdb_id, rt_id, title, critic, aud, cast, thumb, orig) "
qry += "VALUES (%s, %s, %s, %d, %d, %s, %s, %s);" % params
print qry
try:
mydb.cursor.execute(qry)
mydb.cnx.commit()
except Exception,e:
print e
def download_rt_info():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'], \
os.environ['SQLPASS'])
qry = "SELECT * FROM imdb_info"
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
for c in casts:
#get movie info
print c
(imdb_id,title,yr,director,budget,gross) = c
imdb_id = str(imdb_id)
#check if we already have rt data
if os.path.isfile('json/movies/moviepage'+imdb_id+'.json'):
continue
#query the rt API
url = 'http://api.rottentomatoes.com/api/public/v1.0/movies.json'
url += '?apikey=8pfmg4jgr8z3zcrpubh4v8pg&q='+str(title)
url += '&page_limit=50'
r = requests.get(url)
js = r.json()
#find the matching movie(s)
ls = []
print js
if js and js['total'] > 0:
for m in js['movies']:
if m['year'] == yr:
print 'match'
ls.append(m)
#save data locally if we have a match(es)
if ls:
f = open('json/movies/moviepage'+imdb_id+'.json','w')
f.write(json.dumps(ls))
f.close()
sleep(.2)
def imdb_to_sql():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
fp = open('top_movies.txt','r')
for line in fp:
print line
[title,yr,director,budget,gross,id] = line.split('\t')
if gross == '-': gross = '-1'
if budget == '-': budget = '-1'
id = id.replace('\n','')
qry = "SELECT * FROM imdb_info where imdb_id = "+"'"+id+"'"
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
if casts:
print 'pass'
continue
qry = "INSERT INTO rottentomatoes.imdb_info "
qry += "(imdb_id, title, year, director, budget, gross) "
qry += "VALUES (%s, %s, %d, %s, %d, %d);" % \
("\""+id+"\"","\""+title+"\"",int(yr),"\""+ \
director+"\"",int(budget),int(gross))
print qry
try:
mydb.cursor.execute(qry)
mydb.cnx.commit()
except Exception,e:
print e
fp.close()
def make_typeahead_list():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
qry = "SELECT title FROM imdb_info;"
mydb.cursor.execute(qry)
movieids = [m[0] for m in mydb.cursor]
titles = []
for m in movieids:
titles.append(m)
f = open('../static/movielist.json','w')
f.write(json.dumps(titles))
f.close()
def make_poster_list():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
qry = "SELECT title,orig FROM rt_info;"
mydb.cursor.execute(qry)
movieids = [m for m in mydb.cursor]
titles = [m[0] for m in movieids]
movieids = [m[1] for m in movieids]
posters = []
for i in range(len(movieids)):
p,t = movieids[i],titles[i]
print p
indexes = [i for i, ltr in enumerate(p) if ltr == '/']
localname = p[indexes[-1]+1:]
# posters.append([t,'static/posters/'+localname])
posters.append([t,p])
f = open('../static/posterlist.json','w')
f.write(json.dumps(posters))
f.close()
def writers_to_sql():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
files_list = os.listdir('json/writers')
for f in files_list:
fp = open('json/writers/'+f,'r')
try:
alldata = json.load(fp)
imdb_id = alldata['imdb_id']
for i in range(len(alldata['writers'])):
c = alldata['writers'][i]
writer_name,writer_id = c[0],c[1]
print imdb_id
qry = "SELECT * FROM writers where imdb_id = "+"\""+imdb_id+ \
"\" and writer_id = "+"\""+writer_id+"\";"
casts = mydb.query(qry)
if casts: continue
params = ("\""+imdb_id+"\"", "\""+writer_id+"\"", "\""+writer_name+"\"")
qry = "INSERT INTO writers "
qry += "(imdb_id, writer_id, writer_name) "
qry += "VALUES (%s, %s, %s);" % params
print qry;
mydb.cursor.execute(qry)
mydb.cnx.commit()
except Exception,e:
print e
fp.close()
def scrape_writers():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
qry = "SELECT imdb_id FROM imdb_info;"
mydb.cursor.execute(qry)
movieids = [m[0] for m in mydb.cursor]
for m in movieids:
print m
url ='http://www.imdb.com/title/%s/fullcredits' % m
print url
try:
#get the HTML of the page
req = urllib2.Request(url)
f = urllib2.urlopen(req)
html = f.read()
except:
continue
soup = BeautifulSoup(html)
t = soup.findAll('a')
movie_info = {'imdb_id':m, 'writers':[]}
for td in t:
if 'ttfc_fc_wr' in td['href']:
writer_id = td['href'][6:15]
writer_name = td.contents[0].strip()
if [writer_name,writer_id] not in movie_info['writers']:
movie_info['writers'].append([writer_name,writer_id])
if movie_info['writers']:
f = open('json/writers/'+m+'.json','w')
f.write(json.dumps(movie_info))
f.close()
#a function to scrape IMDB to get information for the top grossing movies
# of each year
def scrape_top_movies(vb=True):
#a file for output
ff = open('top_movies.txt','a')
#iterate over years
for yr in range(1984,2015):
#only 50 movies are shown at a time
for st in range(1,52,50):
print yr,st
#first scrape the top 100 movie page
url = 'http://www.imdb.com/search/title?at=0&sort=boxoffice_gross_us,'
url += 'desc&start='+str(st)+'&year='+str(yr)+','+str(yr)
#get the HTML of the page
req = urllib2.Request(url)
f = urllib2.urlopen(req)
html = f.read()
soup = BeautifulSoup(html)
td = soup.findAll('td',{'class':'title'})
buds = soup.findAll('td',{'class':'sort_col'})
ls = []
#iterate over the table entry elements
for i in range(len(td)):
t,b = td[i],buds[i]
#get the director and budget
c = t.findAll('span',{'class':'credit'})
dirname = c[0].contents[1].contents[0]
budget = b.contents[0]
#find the link with the movie title and id
a = t.findNext('a')
ls.append((a.contents[0],dirname,budget,a['href']))
#iterate through the list
for l in ls[-50:]:
#get information scraped from the last page
try:
title = str(l[0])
director = str(l[1])
except Exception,e:
print e
continue
gross = str(l[2]).replace('$','')
if gross[-1] == 'K': fac = 1e3
else: fac = 1e6
if gross != '-':
gross = int(float(gross[:-1])*fac)
budget = '-'
id = l[3][7:-1]
#go to the movie page, find the budget entry
url = 'http://www.imdb.com/title/'+id
req = urllib2.Request(url)
f = urllib2.urlopen(req)
html = f.read()
soup = BeautifulSoup(html)
txts= soup.findAll('div',{'class':'txt-block'})
for t in txts:
a = t.findAll('h4',{'class':'inline'})
if a and a[0].contents[0] == 'Budget:':
budget = t.contents[2].replace('\n','').replace(',','') \
.replace(' ','').replace('\t','')[1:]
#catch ascii error from strange characters
try:
#console logging
if vb:
print title+'\t'+str(yr)+'\t'+director+'\t'+budget+'\t'+ \
str(gross)+'\t'+id+'\n'
#write the information to a text file
ff.write(title+'\t'+str(yr)+'\t'+director+'\t'+budget+'\t'+ \
str(gross)+'\t'+id+'\n')
except Exception,e:
print e
ff.close()
def cast2_to_sql():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
files_list = os.listdir('json/actors2')
for f in files_list:
fp = open('json/actors2/'+f,'r')
try:
alldata = json.load(fp)
imdb_id = alldata['imdb_id']
print imdb_id, alldata['actors']
for i in range(len(alldata['actors'])):
c = alldata['actors'][i]
print c
idnum = c[0]
name = c[1]
print imdb_id
qry = "SELECT * FROM actors where imdb_id = "+"\""+imdb_id+ \
"\" and actor_id = "+"\""+idnum+"\";"
mydb.cursor.execute(qry)
casts = [m for m in mydb.cursor]
if casts: continue
params = ("\""+imdb_id+"\"", "\""+idnum+"\"", "\""+name+"\"", i)
qry = "INSERT INTO actors "
qry += "(imdb_id, actor_id, actor_name, billed_num) "
qry += "VALUES (%s, %s, %s, %d);" % params
print qry;
mydb.cursor.execute(qry)
mydb.cnx.commit()
except Exception,e:
print e
fp.close()
def scrape_actors():
mydb = db_access('rottentomatoes',os.environ['SQLUSER'],os.environ['SQLPASS'])
qry = "SELECT imdb_id FROM imdb_info;"
mydb.cursor.execute(qry)
movieids = [m[0] for m in mydb.cursor]
for m in movieids:
print m
url ='http://www.imdb.com/title/%s/fullcredits' % m
print url
try:
#get the HTML of the page
req = urllib2.Request(url)
f = urllib2.urlopen(req)
html = f.read()
except:
continue
soup = BeautifulSoup(html)
t = soup.findAll(attrs={'class':'itemprop'})
actors = []
cnt = 0
for item in t:
cnt += 1
if cnt % 2 == 0: continue
name = item.text.strip()
idnum = item.findAll('a')[0]['href'][6:15]
print name,idnum
if name not in actors:
actors.append([idnum,name])
if len(actors) >= 10: break
print actors
movie_info = {'imdb_id':m, 'actors':actors}
if movie_info['actors']:
f = open('json/actors2/'+m+'.json','w')
f.write(json.dumps(movie_info))
f.close()
| |
"""
Tools for converting PyPI packages to conda recipes.
"""
from __future__ import absolute_import, division, print_function
import requests
import keyword
import os
import re
import subprocess
import sys
from collections import defaultdict
from os import makedirs, listdir, getcwd, chdir
from os.path import join, isdir, exists, isfile
from tempfile import mkdtemp
from shutil import copy2
if sys.version_info < (3,):
from xmlrpclib import ServerProxy, Transport, ProtocolError
from urllib2 import build_opener, ProxyHandler, Request, HTTPError
else:
from xmlrpc.client import ServerProxy, Transport, ProtocolError
from urllib.request import build_opener, ProxyHandler, Request
from urllib.error import HTTPError
from conda.fetch import (download, handle_proxy_407)
from conda.connection import CondaSession
from conda.utils import human_bytes, hashsum_file
from conda.install import rm_rf
from conda.compat import input, configparser, StringIO, string_types, PY3
from conda.config import get_proxy_servers
from conda.cli.common import spec_from_line
from conda_build.utils import tar_xf, unzip
from conda_build.source import SRC_CACHE, apply_patch
from conda_build.build import create_env
from conda_build.config import config
PYPI_META = """\
package:
name: {packagename}
version: !!str {version}
source:
fn: {filename}
url: {pypiurl}
{usemd5}md5: {md5}
# patches:
# List any patch files here
# - fix.patch
{build_comment}build:
{egg_comment}preserve_egg_dir: True
{entry_comment}entry_points:
# Put any entry points (scripts to be generated automatically) here. The
# syntax is module:function. For example
#
# - {packagename} = {packagename}:main
#
# Would create an entry point called {packagename} that calls {packagename}.main()
{entry_points}
# If this is a new build for the same version, increment the build
# number. If you do not include this key, it defaults to 0.
# number: 1
requirements:
build:
- python{build_depends}
run:
- python{run_depends}
test:
# Python imports
{import_comment}imports:{import_tests}
{entry_comment}commands:
# You can put test commands to be run here. Use this to test that the
# entry points work.
{test_commands}
# You can also put a file called run_test.py in the recipe that will be run
# at test time.
# requires:
# Put any additional test requirements here. For example
# - nose
about:
home: {homeurl}
license: {license}
summary: {summary}
# See
# http://docs.continuum.io/conda/build.html for
# more information about meta.yaml
"""
PYPI_BUILD_SH = """\
#!/bin/bash
$PYTHON setup.py install
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
PYPI_BLD_BAT = """\
"%PYTHON%" setup.py install
if errorlevel 1 exit 1
:: Add more build steps here, if they are necessary.
:: See
:: http://docs.continuum.io/conda/build.html
:: for a list of environment variables that are set during the build process.
"""
# Note the {} formatting bits here
DISTUTILS_PATCH = '''\
diff core.py core.py
--- core.py
+++ core.py
@@ -166,5 +167,33 @@ def setup (**attrs):
\n
+# ====== BEGIN CONDA SKELETON PYPI PATCH ======
+
+import distutils.core
+import io
+import os.path
+import sys
+import yaml
+from yaml import Loader, SafeLoader
+
+# Override the default string handling function to always return unicode
+# objects (taken from StackOverflow)
+def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
+SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
+
+def setup(*args, **kwargs):
+ data = {{}}
+ data['install_requires'] = kwargs.get('install_requires', [])
+ data['extras_require'] = kwargs.get('extras_require', {{}})
+ data['entry_points'] = kwargs.get('entry_points', [])
+ data['packages'] = kwargs.get('packages', [])
+ data['setuptools'] = 'setuptools' in sys.modules
+ with io.open(os.path.join("{}", "pkginfo.yaml"), 'w', encoding='utf-8') as fn:
+ fn.write(yaml.dump(data, encoding=None))
+
+
+# ======= END CONDA SKELETON PYPI PATCH ======
\n
def run_setup (script_name, script_args=None, stop_after="run"):
"""Run a setup script in a somewhat controlled environment, and
'''
# https://gist.github.com/chrisguitarguy/2354951
class RequestsTransport(Transport):
"""
Drop in Transport for xmlrpclib that uses Requests instead of httplib
"""
# change our user agent to reflect Requests
user_agent = "Python XMLRPC with Requests (python-requests.org)"
# override this if you'd like to https
use_https = True
session = CondaSession()
def request(self, host, handler, request_body, verbose):
"""
Make an xmlrpc request.
"""
headers = {
'User-Agent': self.user_agent,
'Content-Type': 'text/xml',
}
url = self._build_url(host, handler)
try:
resp = self.session.post(url, data=request_body, headers=headers, proxies=self.session.proxies)
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 407: # Proxy Authentication Required
handle_proxy_407(url, self.session)
# Try again
return self.request(host, handler, request_body, verbose)
else:
raise
except requests.exceptions.ConnectionError as e:
# requests isn't so nice here. For whatever reason, https gives this
# error and http gives the above error. Also, there is no status_code
# attribute here. We have to just check if it looks like 407. See
# https://github.com/kennethreitz/requests/issues/2061.
if "407" in str(e): # Proxy Authentication Required
handle_proxy_407(url, self.session)
# Try again
return self.request(host, handler, request_body, verbose)
else:
raise
except requests.RequestException as e:
raise ProtocolError(url, resp.status_code, str(e), resp.headers)
else:
return self.parse_response(resp)
def parse_response(self, resp):
"""
Parse the xmlrpc response.
"""
p, u = self.getparser()
p.feed(resp.text)
p.close()
return u.close()
def _build_url(self, host, handler):
"""
Build a url for our request based on the host, handler and use_http
property
"""
scheme = 'https' if self.use_https else 'http'
return '%s://%s/%s' % (scheme, host, handler)
def main(args, parser):
if len(args.packages) > 1 and args.download:
# Because if a package's setup.py imports setuptools, it will make all
# future packages look like they depend on distribute. Also, who knows
# what kind of monkeypatching the setup.pys out there could be doing.
print("WARNING: building more than one recipe at once without "
"--no-download is not recommended")
proxies = get_proxy_servers()
if proxies:
transport = RequestsTransport()
else:
transport = None
client = ServerProxy(args.pypi_url, transport=transport)
package_dicts = {}
[output_dir] = args.output_dir
indent = '\n - '
all_packages = client.list_packages()
all_packages_lower = [i.lower() for i in all_packages]
while args.packages:
package = args.packages.pop()
# Look for package[extra,...] features spec:
match_extras = re.match(r'^([^[]+)\[([^]]+)\]$', package)
if match_extras:
package, extras = match_extras.groups()
extras = extras.split(',')
else:
extras = []
dir_path = join(output_dir, package.lower())
if exists(dir_path):
raise RuntimeError("directory already exists: %s" % dir_path)
d = package_dicts.setdefault(package, {'packagename': package.lower(),
'run_depends': '',
'build_depends': '',
'entry_points': '',
'build_comment': '# ',
'test_commands': '',
'usemd5': '',
'entry_comment': '#',
'egg_comment': '#'})
d['import_tests'] = valid(package).lower()
if d['import_tests'] == '':
d['import_comment'] = '# '
else:
d['import_comment'] = ''
d['import_tests'] = indent + d['import_tests']
if args.version:
[version] = args.version
versions = client.package_releases(package, True)
if version not in versions:
sys.exit("Error: Version %s of %s is not available on PyPI."
% (version, package))
d['version'] = version
else:
versions = client.package_releases(package)
if not versions:
# The xmlrpc interface is case sensitive, but the index itself
# is apparently not (the last time I checked,
# len(set(all_packages_lower)) == len(set(all_packages)))
if package.lower() in all_packages_lower:
print("%s not found, trying %s" % (package, package.capitalize()))
args.packages.append(all_packages[all_packages_lower.index(package.lower())])
del package_dicts[package]
continue
sys.exit("Error: Could not find any versions of package %s" %
package)
if len(versions) > 1:
print("Warning, the following versions were found for %s" %
package)
for ver in versions:
print(ver)
print("Using %s" % versions[0])
print("Use --version to specify a different version.")
d['version'] = versions[0]
data = client.release_data(package, d['version'])
urls = client.release_urls(package, d['version'])
if not args.all_urls:
# Try to find source urls
urls = [url for url in urls if url['python_version'] == 'source']
if not urls:
if 'download_url' in data:
urls = [defaultdict(str, {'url': data['download_url']})]
urls[0]['filename'] = urls[0]['url'].split('/')[-1]
d['usemd5'] = '#'
else:
sys.exit("Error: No source urls found for %s" % package)
if len(urls) > 1 and not args.noprompt:
print("More than one source version is available for %s:" %
package)
for i, url in enumerate(urls):
print("%d: %s (%s) %s" % (i, url['url'],
human_bytes(url['size']),
url['comment_text']))
n = int(input("Which version should I use? "))
else:
n = 0
print("Using url %s (%s) for %s." % (urls[n]['url'],
human_bytes(urls[n]['size'] or 0),
package))
d['pypiurl'] = urls[n]['url']
d['md5'] = urls[n]['md5_digest']
d['filename'] = urls[n]['filename']
d['homeurl'] = data['home_page']
d['summary'] = repr(data['summary'])
license_classifier = "License :: OSI Approved ::"
if 'classifiers' in data:
licenses = [classifier.split(license_classifier, 1)[1] for classifier in
data['classifiers'] if classifier.startswith(license_classifier)]
else:
licenses = []
if not licenses:
if data['license']:
if args.noprompt:
license = data['license']
elif '\n' not in data['license']:
print('Using "%s" for the license' % data['license'])
license = data['license']
else:
# Some projects put the whole license text in this field
print("This is the license for %s" % package)
print()
print(data['license'])
print()
license = input("What license string should I use? ")
else:
if args.noprompt:
license = "UNKNOWN"
else:
license = input(("No license could be found for %s on " +
"PyPI. What license should I use? ") %
package)
else:
license = ' or '.join(licenses)
d['license'] = license
# Unfortunately, two important pieces of metadata are only stored in
# the package itself: the dependencies, and the entry points (if the
# package uses distribute). Our strategy is to download the package
# and "fake" distribute/setuptools's setup() function to get this
# information from setup.py. If this sounds evil, keep in mind that
# distribute itself already works by monkeypatching distutils.
if args.download:
import yaml
print("Downloading %s (use --no-download to skip this step)" %
package)
tempdir = mkdtemp('conda_skeleton_' + package)
if not isdir(SRC_CACHE):
makedirs(SRC_CACHE)
try:
# Download it to the build source cache. That way, you have
# it.
download_path = join(SRC_CACHE, d['filename'])
if not isfile(download_path) or hashsum_file(download_path,
'md5') != d['md5']:
download(d['pypiurl'], join(SRC_CACHE, d['filename']))
else:
print("Using cached download")
print("Unpacking %s..." % package)
unpack(join(SRC_CACHE, d['filename']), tempdir)
print("done")
print("working in %s" % tempdir)
src_dir = get_dir(tempdir)
run_setuppy(src_dir, tempdir, args)
with open(join(tempdir, 'pkginfo.yaml')) as fn:
pkginfo = yaml.load(fn)
setuptools_build = pkginfo['setuptools']
setuptools_run = False
# Look at the entry_points and construct console_script and
# gui_scripts entry_points for conda
entry_points = pkginfo['entry_points']
if entry_points:
if isinstance(entry_points, str):
# makes sure it is left-shifted
newstr = "\n".join(x.strip()
for x in entry_points.split('\n'))
config = configparser.ConfigParser()
entry_points = {}
try:
config.readfp(StringIO(newstr))
except Exception as err:
print("WARNING: entry-points not understood: ",
err)
print("The string was", newstr)
entry_points = pkginfo['entry_points']
else:
setuptools_run = True
for section in config.sections():
if section in ['console_scripts', 'gui_scripts']:
value = ['%s=%s' % (option, config.get(section, option))
for option in config.options(section)]
entry_points[section] = value
if not isinstance(entry_points, dict):
print("WARNING: Could not add entry points. They were:")
print(entry_points)
else:
cs = entry_points.get('console_scripts', [])
gs = entry_points.get('gui_scripts', [])
# We have *other* kinds of entry-points so we need
# setuptools at run-time
if not cs and not gs and len(entry_points) > 1:
setuptools_build = True
setuptools_run = True
entry_list = (
cs
# TODO: Use pythonw for these
+ gs)
if len(cs + gs) != 0:
d['entry_points'] = indent.join([''] + entry_list)
d['entry_comment'] = ''
d['build_comment'] = ''
d['test_commands'] = indent.join([''] + make_entry_tests(entry_list))
# Extract requested extra feature requirements...
if args.all_extras:
extras_require = list(pkginfo['extras_require'].values())
else:
try:
extras_require = [pkginfo['extras_require'][x] for x in extras]
except KeyError:
sys.exit("Error: Invalid extra features: [%s]"
% ','.join(extras))
#... and collect all needed requirement specs in a single list:
requires = []
for specs in [pkginfo['install_requires']] + extras_require:
if isinstance(specs, string_types):
requires.append(specs)
else:
requires.extend(specs)
if requires or setuptools_build or setuptools_run:
deps = []
for deptext in requires:
# Every item may be a single requirement
# or a multiline requirements string...
for dep in deptext.split('\n'):
#... and may also contain comments...
dep = dep.split('#')[0].strip()
if dep: #... and empty (or comment only) lines
spec = spec_from_line(dep)
if spec is None:
sys.exit("Error: Could not parse: %s" % dep)
deps.append(spec)
if 'setuptools' in deps:
setuptools_build = False
setuptools_run = False
d['egg_comment'] = ''
d['build_comment'] = ''
d['build_depends'] = indent.join([''] +
['setuptools'] * setuptools_build +
deps)
d['run_depends'] = indent.join([''] +
['setuptools'] * setuptools_run +
deps)
if args.recursive:
for dep in deps:
dep = dep.split()[0]
if not exists(join(output_dir, dep)):
args.packages.append(dep)
if pkginfo['packages']:
deps = set(pkginfo['packages'])
if d['import_tests']:
olddeps = [x for x in d['import_tests'].split()
if x != '-']
deps = set(olddeps) | deps
d['import_tests'] = indent.join([''] + sorted(deps))
d['import_comment'] = ''
finally:
rm_rf(tempdir)
for package in package_dicts:
d = package_dicts[package]
makedirs(join(output_dir, package.lower()))
print("Writing recipe for %s" % package.lower())
with open(join(output_dir, package.lower(), 'meta.yaml'), 'w') as f:
f.write(PYPI_META.format(**d))
with open(join(output_dir, package.lower(), 'build.sh'), 'w') as f:
f.write(PYPI_BUILD_SH.format(**d))
with open(join(output_dir, package.lower(), 'bld.bat'), 'w') as f:
f.write(PYPI_BLD_BAT.format(**d))
print("Done")
def valid(name):
if (re.match("[_A-Za-z][_a-zA-Z0-9]*$", name)
and not keyword.iskeyword(name)):
return name
else:
return ''
def unpack(src_path, tempdir):
if src_path.endswith(('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz', '.tar')):
tar_xf(src_path, tempdir)
elif src_path.endswith('.zip'):
unzip(src_path, tempdir)
else:
raise Exception("not a valid source")
def get_dir(tempdir):
lst = [fn for fn in listdir(tempdir) if not fn.startswith('.') and
isdir(join(tempdir, fn))]
if len(lst) == 1:
dir_path = join(tempdir, lst[0])
if isdir(dir_path):
return dir_path
raise Exception("could not find unpacked source dir")
def run_setuppy(src_dir, temp_dir, args):
'''
Patch distutils and then run setup.py in a subprocess.
:param src_dir: Directory containing the source code
:type src_dir: str
:param temp_dir: Temporary directory for doing for storing pkginfo.yaml
:type temp_dir: str
'''
# Do everything in the build env in case the setup.py install goes
# haywire.
# TODO: Try with another version of Python if this one fails. Some
# packages are Python 2 or Python 3 only.
create_env(config.build_prefix, ['python %s*' % args.python_version, 'pyyaml',
'setuptools', 'numpy'], clear_cache=False)
stdlib_dir = join(config.build_prefix, 'Lib' if sys.platform == 'win32' else
'lib/python%s' % args.python_version)
patch = join(temp_dir, 'pypi-distutils.patch')
with open(patch, 'w') as f:
f.write(DISTUTILS_PATCH.format(temp_dir.replace('\\','\\\\')))
if exists(join(stdlib_dir, 'distutils', 'core.py-copy')):
rm_rf(join(stdlib_dir, 'distutils', 'core.py'))
copy2(join(stdlib_dir, 'distutils', 'core.py-copy'), join(stdlib_dir, 'distutils', 'core.py'))
# Avoid race conditions. Invalidate the cache.
if PY3:
rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
'core.cpython-%s%s.pyc' % sys.version_info[:2]))
rm_rf(join(stdlib_dir, 'distutils', '__pycache__',
'core.cpython-%s%s.pyo' % sys.version_info[:2]))
else:
rm_rf(join(stdlib_dir, 'distutils', 'core.pyc'))
rm_rf(join(stdlib_dir, 'distutils', 'core.pyo'))
else:
copy2(join(stdlib_dir, 'distutils', 'core.py'), join(stdlib_dir,
'distutils', 'core.py-copy'))
apply_patch(join(stdlib_dir, 'distutils'), patch)
# Save PYTHONPATH for later
env = os.environ.copy()
if 'PYTHONPATH' in env:
env[str('PYTHONPATH')] = str(src_dir + ':' + env['PYTHONPATH'])
else:
env[str('PYTHONPATH')] = str(src_dir)
cwd = getcwd()
chdir(src_dir)
args = [config.build_python, 'setup.py', 'install']
try:
subprocess.check_call(args, env=env)
except subprocess.CalledProcessError:
print('$PYTHONPATH = %s' % env['PYTHONPATH'])
sys.exit('Error: command failed: %s' % ' '.join(args))
finally:
chdir(cwd)
def make_entry_tests(entry_list):
tests = []
for entry_point in entry_list:
entry = entry_point.partition('=')[0].strip()
tests.append(entry + " --help")
return tests
| |
"""
Identifier Class for ip-cidr-list-url
"""
import datetime
import radix
import time
import threading
from ...iso8601 import *
from ...jqfilter import *
from ...jsonval import *
from ...psjson import *
from ...psurl import *
data_validator = {
"type": "object",
"properties": {
"source": { "$ref": "#/pScheduler/URL" },
"transform": { "$ref": "#/pScheduler/JQTransformSpecification" },
"bind": { "$ref": "#/pScheduler/Host" },
"transform": { "$ref": "#/pScheduler/JQTransformSpecification" },
"exclude": {
"type": "array",
"items": {
"anyOf": [
{ "$ref": "#/pScheduler/IPCIDR" },
{ "$ref": "#/pScheduler/IPAddress" }
]
}
},
"update": { "$ref": "#/pScheduler/Duration" },
"retry": { "$ref": "#/pScheduler/Duration" },
"fail-state": { "$ref": "#/pScheduler/Boolean" }
},
"additionalProperties": False,
"required": [ "source", "update", "retry" ]
}
def data_is_valid(data):
"""Check to see if data is valid for this class. Returns a tuple of
(bool, string) indicating valididty and any error message.
"""
valid, error = json_validate(data, data_validator)
if not valid:
return valid, error
return valid, error
class IdentifierIPCIDRListURL(object):
"""Class that holds and processes identifiers as lists of CIDRs
fetched from a URL.
"""
def __len__(self):
with self.data_lock:
return self.length
def __populate_cidrs_update__(self):
"""
Update the CIDR list. It is assumed that the caller will have
protected against calling two of these at once.
"""
status, text = url_get(self.source, bind=self.bind,
json=False, throw=False)
possible_next_attempt = datetime.datetime.now() + self.retry
if status != 200:
# TODO: Would be nice if we could log the failure
with self.data_lock:
self.next_attempt = possible_next_attempt
return
# If there's a transform, apply it.
if self.transform is not None:
try:
json = json_load(text)
text = self.transform(json)
except (ValueError,
JQRuntimeError):
# TODO: Would be nice if we could log the failure
with self.data_lock:
self.next_attempt = possible_next_attempt
return
# TODO: Consider caching this on disk someplace so that it can
# be retrieved if we fail to fetch at startup.
# TODO: When threaded, hold this separately and swap old list
new_cidrs = radix.Radix()
new_length = 0
for cidr in text.split('\n'):
# Remove comments and ditch excess whitespace
cidr = cidr.split('#', 1)[0].strip()
if len(cidr) == 0:
continue
try:
new_cidrs.add(cidr)
new_length += 1
except ValueError:
# Just ignore anything that looks fishy.
# TODO: Log it?
pass
with self.data_lock:
self.cidrs = new_cidrs
self.length = new_length
self.next_attempt = datetime.datetime.now() + self.update
def __populate_cidrs__(self):
with self.data_lock:
if self.updating or self.next_attempt > datetime.datetime.now():
# Not time yet or an update is already underway.
return
self.updating = True
try:
self.__populate_cidrs_update__()
finally:
with self.data_lock:
self.updating = False
def __init__(self,
data # Data suitable for this class
):
valid, message = data_is_valid(data)
if not valid:
raise ValueError("Invalid data: %s" % message)
self.source = data['source']
self.bind = data.get('bind', None)
self.update = iso8601_as_timedelta(data['update'])
self.retry = iso8601_as_timedelta(data['retry'])
self.fail_state = data.get('fail-state', False)
try:
# This will raise a ValueError if it's wrong.
transform = data["transform"]
self.transform = JQFilter(transform["script"],
transform.get("args", {} ),
output_raw=True)
except KeyError:
self.transform = None
self.exclusions = radix.Radix()
if 'exclude' in data:
try:
for excl in data['exclude']:
self.exclusions.add(excl)
except ValueError:
raise ValueError("Invalid IP or CIDR '%s'" % excl)
self.data_lock = threading.Lock()
self.updating = False
# TODO: Would be nice to support a timeout so the system
# doesn't sit for too long.
self.cidrs = radix.Radix()
self.length = 0
# Prime the timer with the epoch and do a first load of the list
self.next_attempt = datetime.datetime.utcfromtimestamp(0)
self.__populate_cidrs__()
def evaluate(self,
hints # Information used for doing identification
):
"""Given a set of hints, evaluate this identifier and return True if
an identification is made.
"""
self.__populate_cidrs__()
if self.length == 0:
return self.fail_state
try:
ip = hints['requester']
except KeyError:
return False
try:
prefix = self.cidrs.search_best(ip)
except ValueError:
raise ValueError("Invalid IP '%s'" % ip)
if prefix is None:
return False
return self.exclusions.search_best(ip) is None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Union
from croniter import croniter
from flask_babel import gettext as _
from marshmallow import fields, Schema, validate, validates_schema
from marshmallow.validate import Length, Range, ValidationError
from marshmallow_enum import EnumField
from pytz import all_timezones
from superset.models.reports import (
ReportCreationMethodType,
ReportDataFormat,
ReportRecipientType,
ReportScheduleType,
ReportScheduleValidatorType,
)
openapi_spec_methods_override = {
"get": {"get": {"description": "Get a report schedule"}},
"get_list": {
"get": {
"description": "Get a list of report schedules, use Rison or JSON "
"query parameters for filtering, sorting,"
" pagination and for selecting specific"
" columns and metadata.",
}
},
"post": {"post": {"description": "Create a report schedule"}},
"put": {"put": {"description": "Update a report schedule"}},
"delete": {"delete": {"description": "Delete a report schedule"}},
}
get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}}
type_description = "The report schedule type"
name_description = "The report schedule name."
# :)
description_description = "Use a nice description to give context to this Alert/Report"
context_markdown_description = "Markdown description"
crontab_description = (
"A CRON expression."
"[Crontab Guru](https://crontab.guru/) is "
"a helpful resource that can help you craft a CRON expression."
)
timezone_description = "A timezone string that represents the location of the timezone."
sql_description = (
"A SQL statement that defines whether the alert should get triggered or "
"not. The query is expected to return either NULL or a number value."
)
owners_description = (
"Owner are users ids allowed to delete or change this report. "
"If left empty you will be one of the owners of the report."
)
validator_type_description = (
"Determines when to trigger alert based off value from alert query. "
"Alerts will be triggered with these validator types:\n"
"- Not Null - When the return value is Not NULL, Empty, or 0\n"
"- Operator - When `sql_return_value comparison_operator threshold`"
" is True e.g. `50 <= 75`<br>Supports the comparison operators <, <=, "
">, >=, ==, and !="
)
validator_config_json_op_description = (
"The operation to compare with a threshold to apply to the SQL output\n"
)
log_retention_description = "How long to keep the logs around for this report (in days)"
grace_period_description = (
"Once an alert is triggered, how long, in seconds, before "
"Superset nags you again. (in seconds)"
)
working_timeout_description = (
"If an alert is staled at a working state, how long until it's state is reseted to"
" error"
)
creation_method_description = (
"Creation method is used to inform the frontend whether the report/alert was "
"created in the dashboard, chart, or alerts and reports UI."
)
def validate_crontab(value: Union[bytes, bytearray, str]) -> None:
if not croniter.is_valid(str(value)):
raise ValidationError("Cron expression is not valid")
class ValidatorConfigJSONSchema(Schema):
op = fields.String( # pylint: disable=invalid-name
description=validator_config_json_op_description,
validate=validate.OneOf(choices=["<", "<=", ">", ">=", "==", "!="]),
)
threshold = fields.Float()
class ReportRecipientConfigJSONSchema(Schema):
# TODO if email check validity
target = fields.String()
class ReportRecipientSchema(Schema):
type = fields.String(
description="The recipient type, check spec for valid options",
allow_none=False,
required=True,
validate=validate.OneOf(
choices=tuple(key.value for key in ReportRecipientType)
),
)
recipient_config_json = fields.Nested(ReportRecipientConfigJSONSchema)
class ReportSchedulePostSchema(Schema):
type = fields.String(
description=type_description,
allow_none=False,
required=True,
validate=validate.OneOf(choices=tuple(key.value for key in ReportScheduleType)),
)
name = fields.String(
description=name_description,
allow_none=False,
required=True,
validate=[Length(1, 150)],
example="Daily dashboard email",
)
description = fields.String(
description=description_description,
allow_none=True,
required=False,
example="Daily sales dashboard to marketing",
)
context_markdown = fields.String(
description=context_markdown_description, allow_none=True, required=False
)
active = fields.Boolean()
crontab = fields.String(
description=crontab_description,
validate=[validate_crontab, Length(1, 1000)],
example="*/5 * * * *",
allow_none=False,
required=True,
)
timezone = fields.String(
description=timezone_description,
default="UTC",
validate=validate.OneOf(choices=tuple(all_timezones)),
)
sql = fields.String(
description=sql_description, example="SELECT value FROM time_series_table"
)
chart = fields.Integer(required=False, allow_none=True)
creation_method = EnumField(
ReportCreationMethodType,
by_value=True,
required=False,
description=creation_method_description,
)
dashboard = fields.Integer(required=False, allow_none=True)
database = fields.Integer(required=False)
owners = fields.List(fields.Integer(description=owners_description))
validator_type = fields.String(
description=validator_type_description,
validate=validate.OneOf(
choices=tuple(key.value for key in ReportScheduleValidatorType)
),
)
validator_config_json = fields.Nested(ValidatorConfigJSONSchema)
log_retention = fields.Integer(
description=log_retention_description,
example=90,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
grace_period = fields.Integer(
description=grace_period_description,
example=60 * 60 * 4,
default=60 * 60 * 4,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
working_timeout = fields.Integer(
description=working_timeout_description,
example=60 * 60 * 1,
default=60 * 60 * 1,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
recipients = fields.List(fields.Nested(ReportRecipientSchema))
report_format = fields.String(
default=ReportDataFormat.VISUALIZATION,
validate=validate.OneOf(choices=tuple(key.value for key in ReportDataFormat)),
)
force_screenshot = fields.Boolean(default=False)
@validates_schema
def validate_report_references( # pylint: disable=unused-argument,no-self-use
self, data: Dict[str, Any], **kwargs: Any
) -> None:
if data["type"] == ReportScheduleType.REPORT:
if "database" in data:
raise ValidationError(
{"database": ["Database reference is not allowed on a report"]}
)
class ReportSchedulePutSchema(Schema):
type = fields.String(
description=type_description,
required=False,
validate=validate.OneOf(choices=tuple(key.value for key in ReportScheduleType)),
)
name = fields.String(
description=name_description, required=False, validate=[Length(1, 150)]
)
description = fields.String(
description=description_description,
allow_none=True,
required=False,
example="Daily sales dashboard to marketing",
)
context_markdown = fields.String(
description=context_markdown_description, allow_none=True, required=False
)
active = fields.Boolean(required=False)
crontab = fields.String(
description=crontab_description,
validate=[validate_crontab, Length(1, 1000)],
required=False,
)
timezone = fields.String(
description=timezone_description,
default="UTC",
validate=validate.OneOf(choices=tuple(all_timezones)),
)
sql = fields.String(
description=sql_description,
example="SELECT value FROM time_series_table",
required=False,
allow_none=True,
)
chart = fields.Integer(required=False, allow_none=True)
creation_method = EnumField(
ReportCreationMethodType,
by_value=True,
allow_none=True,
description=creation_method_description,
)
dashboard = fields.Integer(required=False, allow_none=True)
database = fields.Integer(required=False)
owners = fields.List(fields.Integer(description=owners_description), required=False)
validator_type = fields.String(
description=validator_type_description,
validate=validate.OneOf(
choices=tuple(key.value for key in ReportScheduleValidatorType)
),
allow_none=True,
required=False,
)
validator_config_json = fields.Nested(ValidatorConfigJSONSchema, required=False)
log_retention = fields.Integer(
description=log_retention_description,
example=90,
required=False,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
grace_period = fields.Integer(
description=grace_period_description,
example=60 * 60 * 4,
required=False,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
working_timeout = fields.Integer(
description=working_timeout_description,
example=60 * 60 * 1,
allow_none=True,
required=False,
validate=[Range(min=1, error=_("Value must be greater than 0"))],
)
recipients = fields.List(fields.Nested(ReportRecipientSchema), required=False)
report_format = fields.String(
default=ReportDataFormat.VISUALIZATION,
validate=validate.OneOf(choices=tuple(key.value for key in ReportDataFormat)),
)
force_screenshot = fields.Boolean(default=False)
| |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.14"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| |
import bz2
import zlib
from datetime import datetime
import pika
from pika.spec import Basic, BasicProperties
from mock import patch, sentinel
from nose_parameterized import parameterized
from pikachewie.data import Properties
from pikachewie.message import Message
from tests import _BaseTestCase, unittest
mod = 'pikachewie.message'
class DescribeMessage(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
('_flush_outbound',
patch('pika.connection.Connection._flush_outbound')),
)
method_delegates = '''
consumer_tag
delivery_tag
exchange
redelivered
routing_key
'''.split()
properties_delegates = '''
app_id
content_encoding
content_type
correlation_id
expiration
headers
message_id
priority
reply_to
timestamp
type
user_id
'''.split()
def configure(self):
self.consumer_tag = 'R2'
self.delivery_tag = '1138'
self.exchange = 'droids'
self.redelivered = True
self.routing_key = 'dejarik'
self.app_id = '3PO'
self.content_encoding = 'GZIP'
self.content_type = 'application/JSON'
self.correlation_id = 'dockingbay94'
self.expiration = '60000' # 1 minute TTL
self.headers = {'tags': ['best practices']}
self.message_id = '1234'
self.priority = 9
self.reply_to = 'reply_to_address'
self.timestamp = '1234567890'
self.type = 'message_type_name'
self.user_id = 'amqp_user_id'
self.channel = pika.channel.Channel(pika.connection.Connection(),
channel_number=1)
self.method = Basic.Deliver(**dict([(x, getattr(self, x)) for x in
self.method_delegates]))
self.body = '{"strategy": "Let the Wookie win."}'
self.header = BasicProperties(**dict([(x, getattr(self, x)) for x in
self.properties_delegates]))
def execute(self):
self.message = Message(self.channel, self.method, self.header,
self.body)
def should_have_channel(self):
self.assertIs(self.message.channel, self.channel)
def should_have_method_frame(self):
self.assertIs(self.message.method, self.method)
def should_have_properties(self):
# these multiple assertions are needed because headers are deepcopied
self.assertIsInstance(self.message.properties, Properties)
self.assertIs(self.message.properties.app_id, self.app_id)
self.assertEqual(self.message.properties.headers, self.headers)
def should_have_body(self):
self.assertEqual(self.message.body, self.body)
def should_have_consumer_tag(self):
self.assertEqual(self.message.consumer_tag, self.consumer_tag)
def should_have_delivery_tag(self):
self.assertEqual(self.message.delivery_tag, self.delivery_tag)
def should_have_exchange(self):
self.assertEqual(self.message.exchange, self.exchange)
def should_have_redelivered(self):
self.assertEqual(self.message.redelivered, self.redelivered)
def should_have_routing_key(self):
self.assertEqual(self.message.routing_key, self.routing_key)
def should_have_app_id(self):
self.assertEqual(self.message.app_id, self.app_id)
def should_have_correlation_id(self):
self.assertEqual(self.message.correlation_id, self.correlation_id)
def should_have_headers(self):
self.assertEqual(self.message.headers, self.headers)
def should_have_id(self):
self.assertEqual(self.message.id, self.message_id)
def should_have_priority(self):
self.assertEqual(self.message.priority, self.priority)
def should_have_reply_to(self):
self.assertEqual(self.message.reply_to, self.reply_to)
def should_have_type(self):
self.assertEqual(self.message.type, self.type)
def should_have_user_id(self):
self.assertEqual(self.message.user_id, self.user_id)
def should_have_lowercase_content_encoding(self):
self.assertEqual(self.message.content_encoding, 'gzip')
def should_have_lowercase_content_type(self):
self.assertEqual(self.message.content_type, 'application/json')
def should_have_created_at(self):
self.assertEqual(self.message.created_at,
datetime(2009, 2, 13, 18, 31, 30))
def should_have_expires_at(self):
self.assertEqual(self.message.expires_at,
datetime(2009, 2, 13, 18, 32, 30))
class WhenMessageCreatedAtUndefined(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
)
def configure(self):
self.header = BasicProperties()
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.created_at = self.message.created_at
def should_return_none(self):
self.assertIsNone(self.created_at)
class WhenMessageExpiresAtUndefinedDueToCreatedAt(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
)
def configure(self):
self.header = BasicProperties(expiration='60000')
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.expires_at = self.message.expires_at
def should_return_none(self):
self.assertIsNone(self.expires_at)
class WhenMessageExpiresAtUndefinedDueToExpiration(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
)
def configure(self):
self.header = BasicProperties(timestamp='1234567890')
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.expires_at = self.message.expires_at
def should_return_none(self):
self.assertIsNone(self.expires_at)
class WhenMessageDoesNotExpire(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
)
def configure(self):
self.header = BasicProperties()
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.is_expired = self.message.is_expired
def should_not_be_expired(self):
self.assertIs(self.is_expired, False)
class WhenMessageHasNotExpired(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
('datetime', patch(mod + '.datetime', wraps=datetime)),
)
def configure(self):
self.ctx.datetime.now.return_value = datetime(2009, 2, 13, 18, 32, 29)
self.header = BasicProperties(timestamp='1234567890',
expiration='60000')
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.is_expired = self.message.is_expired
def should_not_be_expired(self):
self.assertIs(self.is_expired, False)
class WhenMessageHasExpired(_BaseTestCase):
__contexts__ = (
('_adapter_connect',
patch('pika.connection.Connection._adapter_connect')),
('datetime', patch(mod + '.datetime', wraps=datetime)),
)
def configure(self):
self.ctx.datetime.now.return_value = datetime(2009, 2, 13, 18, 32, 30)
self.header = BasicProperties(timestamp='1234567890',
expiration='60000')
self.message = Message(sentinel.channel, sentinel.method, self.header,
sentinel.body)
def execute(self):
self.is_expired = self.message.is_expired
def should_not_be_expired(self):
self.assertIs(self.is_expired, True)
@patch('pika.connection.Connection._adapter_connect')
class WhenGettingPayload(unittest.TestCase):
body = '{"strategy": "Let the Wookie win."}'
@parameterized.expand((
('plain text', {}, body, body),
('bzip2', {'content_encoding': 'bzip2'}, bz2.compress(body), body),
('gzip', {'content_encoding': 'gzip'}, zlib.compress(body), body),
('json', {'content_type': 'application/json'}, body,
{'strategy': 'Let the Wookie win.'}),
))
def should_return_expected_payload(self, name, headers, body, expected):
self.header = BasicProperties(**headers)
message = Message(sentinel.channel, sentinel.method, self.header, body)
self.assertEqual(message.payload, expected)
| |
# electoral_district/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import ElectoralDistrict, ElectoralDistrictManager
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists, convert_to_int, extract_state_from_ocd_division_id, \
extract_district_id_label_when_district_id_exists_from_ocd_id
import xml.etree.ElementTree as ElementTree
from exception.models import handle_exception
from exception.models import handle_record_found_more_than_one_exception
logger = wevote_functions.admin.get_logger(__name__)
def electoral_districts_import_from_sample_file(filename):
"""
Get the XML data, and either create new entries or update existing
:return:
"""
# Load saved xml from local file
logger.info("Loading electoral_districts from local XML file")
xml_tree = ElementTree.parse(filename)
xml_root = xml_tree.getroot()
electoral_district_item_list = ''
if xml_root:
# Look for ElectoralDistrict and create the Master table first. ElectoralDistrict is the direct child node
# of vipObject
electoral_district_item_list = xml_root.findall('ElectoralDistrict')
# number_of_electoral_districts = len(electoral_district_item_list)
return electoral_district_import_from_xml_data(electoral_district_item_list)
def electoral_district_import_from_xml_data(electoral_district_xml_data):
"""
Get the xml data, and either create new entries or update existing entries for electoral district
:return:
"""
electoral_district_not_processed = 0
electoral_district_saved = 0
electoral_district_updated = 0
success = False
status = ''
electoral_district_manager = ElectoralDistrictManager()
limit_for_testing = 0
for one_electoral_district in electoral_district_xml_data:
total_count = electoral_district_saved + electoral_district_not_processed + electoral_district_updated
if positive_value_exists(limit_for_testing) and total_count >= limit_for_testing:
# This limitation is used when we are doing development and testing
break
electoral_district_number = None
electoral_district_other_type = ''
ocd_id_external_id = ''
electoral_district_type = ''
duplicate_entry = 0
ctcl_id_temp = one_electoral_district.attrib['id']
electoral_district_name = one_electoral_district.find('Name').text
electoral_district_type_found = one_electoral_district.find('Type')
if electoral_district_type_found is not None:
electoral_district_type = electoral_district_type_found.text
# TODO validate electoral_district_type from electoralDistrictType enum
electoral_district_number_found = one_electoral_district.find('Number')
if electoral_district_number_found is not None:
electoral_district_number = convert_to_int(electoral_district_number_found.text)
electoral_district_other_type_found = one_electoral_district.find('OtherType')
if electoral_district_other_type_found is not None:
electoral_district_other_type = electoral_district_other_type_found.text
external_identifiers_list = one_electoral_district.findall(
"./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']/Value")
if external_identifiers_list is not None:
# look for value of 'ocd-id type value under ExternalIdentifier
ocd_id_external_id = one_electoral_district.find(
"./ExternalIdentifiers/ExternalIdentifier/[Type='ocd-id']/Value").text
# Pull state_code from ocdDivisionId
if positive_value_exists(ocd_id_external_id):
# ocd_division_id = ocd_id_external_id
state_code = extract_state_from_ocd_division_id(ocd_id_external_id)
if not positive_value_exists(state_code):
district_code = extract_district_id_label_when_district_id_exists_from_ocd_id(ocd_id_external_id)
district_code.lower()
# check if it is District of Columbia (DC). DC doesn't have state substring in ocd_id
if district_code == 'dc':
state_code = 'dc'
else:
state_code = ''
# Always store state_code in lower case
if state_code:
state_code = state_code.lower()
# Make sure we have the minimum required variables
if not positive_value_exists(ctcl_id_temp) or not positive_value_exists(electoral_district_name):
electoral_district_not_processed += 1
continue
# check if this is a duplicate entry
try:
# TODO check what constitutes a UNIQUE record in the table
electoral_district_query = ElectoralDistrict.objects.order_by('id')
if electoral_district_query:
electoral_district_query = electoral_district_query.filter(
ctcl_id_temp=ctcl_id_temp, electoral_district_name=electoral_district_name)
# electoral_district_query = electoral_district_query.filter(
# electoral_district_name=electoral_district_name)
# TODO currently update is not handled. Based on what constitutes a unique row, handle update
if electoral_district_query.count() > 0:
duplicate_entry = 1
electoral_district_not_processed += 1
if duplicate_entry > 0:
# This entry already exists, skip update_or_create. set success to True
success = True
status += "ELECTORAL_DISTRICT_ENTRY_EXISTS "
else:
try:
updated_values = {
'electoral_district_type': electoral_district_type,
'electoral_district_number': electoral_district_number,
'electoral_district_other_type': electoral_district_other_type,
'ocd_id_external_id': ocd_id_external_id,
'state_code': state_code
}
results = electoral_district_manager.update_or_create_electoral_district(
ctcl_id_temp,
electoral_district_name,
updated_values)
if not results:
electoral_district_not_processed += 1
success = False
elif results['new_electoral_district_created']:
electoral_district_saved += 1
success = True
status += "ELECTORAL_DISTRICT_IMPORT_PROCESS_CREATED "
else:
electoral_district_updated += 1
success = True
status += "ELECTORAL_DISTRICT_IMPORT_PROCESS_UPDATED "
except Exception as e:
status += 'FAILED update_or_create_electoral_district. ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
handle_exception(e, logger=logger, exception_message=status)
success = False
pass
except ElectoralDistrict.DoesNotExist:
pass
electoral_district_results = {
'success': success,
'status': status,
'saved': electoral_district_saved,
'updated': electoral_district_updated,
'not_processed': electoral_district_not_processed,
}
return electoral_district_results
def retrieve_electoral_district(ctcl_id_temp):
results = ''
state_code = ''
state_code_found = False
electoral_district_found = False
try:
electoral_district_query = ElectoralDistrict.objects.using('readonly').all()
electoral_district_item = electoral_district_query.get(ctcl_id_temp=ctcl_id_temp)
electoral_district_found = True
state_code = electoral_district_item.state_code
if positive_value_exists(state_code):
state_code_found = True
except ElectoralDistrict.MultipleObjectsReturned as e:
electoral_district_item = ElectoralDistrict()
handle_record_found_more_than_one_exception(e, logger)
status = "ERROR_MORE_THAN_ONE_ELECTORAL_DISTRICT_FOUND"
except ElectoralDistrict.DoesNotExist:
electoral_district_item = ElectoralDistrict()
pass
# return electoral_district_item
results = {
'electoral_district_found': electoral_district_found,
'state_code_found': state_code_found,
'electoral_district': electoral_district_item,
'state_code': state_code
}
return results
def get_electoral_district_number(self):
return self.electoral_district_number
def get_electoral_district_name(self):
return self.electoral_district_name
def get_electoral_district_other_type(self):
return self.electoral_district_other_type
def get_electoral_district_type(self):
return self.electoral_district_type
| |
import argparse
import os
from joblib import Parallel, delayed
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.competition_data_manager
from autosklearn.pipeline.classification import SimpleClassificationPipeline
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'helena'
output = args.output
path = os.path.join(input, dataset)
D = autosklearn.data.competition_data_manager.CompetitionDataManager(path)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Replace the following array by a new ensemble
choices = \
[(0.220000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'adaboost',
'classifier:adaboost:algorithm': 'SAMME.R',
'classifier:adaboost:learning_rate': 0.12736378214916136,
'classifier:adaboost:max_depth': 2,
'classifier:adaboost:n_estimators': 102,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'min/max'})),
(0.140000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'liblinear_svc',
'classifier:liblinear_svc:C': 34.52330718740001,
'classifier:liblinear_svc:dual': 'False',
'classifier:liblinear_svc:fit_intercept': 'True',
'classifier:liblinear_svc:intercept_scaling': 1,
'classifier:liblinear_svc:loss': 'squared_hinge',
'classifier:liblinear_svc:multi_class': 'ovr',
'classifier:liblinear_svc:penalty': 'l2',
'classifier:liblinear_svc:tol': 0.010305332230700001,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.00012464201046600006,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'polynomial',
'preprocessor:polynomial:degree': 2,
'preprocessor:polynomial:include_bias': 'True',
'preprocessor:polynomial:interaction_only': 'False',
'rescaling:__choice__': 'none'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.1473936812138448,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 11,
'classifier:random_forest:min_samples_split': 10,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'fast_ica',
'preprocessor:fast_ica:algorithm': 'parallel',
'preprocessor:fast_ica:fun': 'logcosh',
'preprocessor:fast_ica:n_components': 945,
'preprocessor:fast_ica:whiten': 'True',
'rescaling:__choice__': 'none'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.3455409527727558,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'pca',
'preprocessor:pca:keep_variance': 0.7598172817638718,
'preprocessor:pca:whiten': 'False',
'rescaling:__choice__': 'standardize'})),
(0.060000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.873556221817867,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.007384474684230516,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.44352666713957484,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'normalize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'lda',
'classifier:lda:n_components': 12,
'classifier:lda:shrinkage': 'manual',
'classifier:lda:shrinkage_factor': 0.9016175646665451,
'classifier:lda:tol': 0.0001716207118446579,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.009728842857612658,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'fast_ica',
'preprocessor:fast_ica:algorithm': 'parallel',
'preprocessor:fast_ica:fun': 'logcosh',
'preprocessor:fast_ica:n_components': 914,
'preprocessor:fast_ica:whiten': 'True',
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 4.213462678722325,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.020501216047798837,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 4.367371232039595,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.01303718715506049,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.286051530772571,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.026747542179073727,
'preprocessor:select_rates:mode': 'fwe',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'min/max'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.907981363846062,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.38925641117203025,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'f_classif',
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.873556221817867,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.007384474684230516,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 709.0694499917347,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.013228763477510586,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 3.0174234734498917,
'classifier:extra_trees:min_samples_leaf': 2,
'classifier:extra_trees:min_samples_split': 12,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.007553789957243724,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 31.20787569423215,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 1.7149340429765088e-05,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 4.592027482980136,
'classifier:extra_trees:min_samples_leaf': 12,
'classifier:extra_trees:min_samples_split': 12,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.003355962206220629,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 0.14162959993684351,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.009394425053603682,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 2.655307919311661,
'classifier:extra_trees:min_samples_leaf': 2,
'classifier:extra_trees:min_samples_split': 16,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00019806605573813597,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 18.30206355212093,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 3.267407083806816e-05,
'rescaling:__choice__': 'normalize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.2325644317889806,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.01303718715506049,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 7.286051530772571,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 1.3440864854665975,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 8,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 17,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 8.519756045823158,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.08901572125739037,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.842249530515841,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 13,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 10,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 3.7289920990557777,
'classifier:extra_trees:min_samples_leaf': 3,
'classifier:extra_trees:min_samples_split': 13,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.00037734441447340595,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 0.6186775496832956,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 1.710156140413348e-05,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 4.430190032276566,
'classifier:extra_trees:min_samples_leaf': 5,
'classifier:extra_trees:min_samples_split': 9,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.0027303638882864483,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 30343.867455246524,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.005743178077382402,
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.8440666453536427,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 14,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'liblinear_svc_preprocessor',
'preprocessor:liblinear_svc_preprocessor:C': 28279.093774727116,
'preprocessor:liblinear_svc_preprocessor:dual': 'False',
'preprocessor:liblinear_svc_preprocessor:fit_intercept': 'True',
'preprocessor:liblinear_svc_preprocessor:intercept_scaling': 1,
'preprocessor:liblinear_svc_preprocessor:loss': 'squared_hinge',
'preprocessor:liblinear_svc_preprocessor:multi_class': 'ovr',
'preprocessor:liblinear_svc_preprocessor:penalty': 'l1',
'preprocessor:liblinear_svc_preprocessor:tol': 0.0010803540483296555,
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'qda',
'classifier:qda:reg_param': 1.3455409527727558,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.04805977625874754,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'extra_trees_preproc_for_classification',
'preprocessor:extra_trees_preproc_for_classification:bootstrap': 'False',
'preprocessor:extra_trees_preproc_for_classification:criterion': 'entropy',
'preprocessor:extra_trees_preproc_for_classification:max_depth': 'None',
'preprocessor:extra_trees_preproc_for_classification:max_features': 3.6600607240096594,
'preprocessor:extra_trees_preproc_for_classification:min_samples_leaf': 18,
'preprocessor:extra_trees_preproc_for_classification:min_samples_split': 18,
'preprocessor:extra_trees_preproc_for_classification:min_weight_fraction_leaf': 0.0,
'preprocessor:extra_trees_preproc_for_classification:n_estimators': 100,
'rescaling:__choice__': 'none'})),
]
targets = []
predictions = []
predictions_valid = []
predictions_test = []
def fit_and_predict(estimator, weight, X, y):
try:
estimator.fit(X.copy(), y.copy())
pv = estimator.predict_proba(X_valid.copy()) * weight
pt = estimator.predict_proba(X_test.copy()) * weight
except Exception as e:
print(e)
print(estimator.configuration)
pv = None
pt = None
return pv, pt
# Make predictions and weight them
all_predictions = Parallel(n_jobs=-1)(delayed(fit_and_predict) \
(estimator, weight, X, y) for
weight, estimator in choices)
for pv, pt in all_predictions:
predictions_valid.append(pv)
predictions_test.append(pt)
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0).astype(np.float32)
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ', fmt='%.4e')
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import bson
import datetime
import json
import logging
import multiprocessing
import re
import string
import sys
import colander as col
from ming.orm import session, ThreadLocalORMSession
from pylons import tmpl_context as c, app_globals as g
from allura import model as M
from allura.lib import helpers as h
from allura.lib.plugin import ProjectRegistrationProvider
log = logging.getLogger(__name__)
class TroveCategory():
def __init__(self, root_type=''):
self.root_type = root_type
def deserialize(self, node, cstruct):
if cstruct is col.null:
return col.null
cat = M.TroveCategory.query.get(fullpath=cstruct)
if not cat:
cat = M.TroveCategory.query.get(fullname=cstruct)
if not cat:
raise col.Invalid(node,
'"%s" is not a valid trove category.' % cstruct)
if not cat.fullpath.startswith(self.root_type):
raise col.Invalid(node,
'"%s" is not a valid "%s" trove category.' %
(cstruct, self.root_type))
return cat
class User():
def deserialize(self, node, cstruct):
if cstruct is col.null:
return col.null
user = M.User.by_username(cstruct)
if not user:
raise col.Invalid(node,
'Invalid username "%s".' % cstruct)
return user
class ProjectName(object):
def __init__(self, name, shortname):
self.name = name
self.shortname = shortname
class ProjectNameType():
def deserialize(self, node, cstruct):
if cstruct is col.null:
return col.null
name = cstruct
shortname = re.sub("[^A-Za-z0-9 ]", "", name).lower()
shortname = re.sub(" ", "-", shortname)
return ProjectName(name, shortname)
class ProjectShortnameType():
def __init__(self, nbhd, update):
self.nbhd = nbhd
self.update = update
def deserialize(self, node, cstruct):
if cstruct is col.null:
return col.null
return ProjectRegistrationProvider.get().shortname_validator.to_python(cstruct,
check_allowed=not self.update,
neighborhood=self.nbhd)
class Award():
def __init__(self, nbhd):
self.nbhd = nbhd
def deserialize(self, node, cstruct):
if cstruct is col.null:
return col.null
award = M.Award.query.find(dict(short=cstruct,
created_by_neighborhood_id=self.nbhd._id)).first()
if not award:
# try to look up the award by _id
award = M.Award.query.find(dict(_id=bson.ObjectId(cstruct),
created_by_neighborhood_id=self.nbhd._id)).first()
if not award:
raise col.Invalid(node,
'Invalid award "%s".' % cstruct)
return award
class TroveTopics(col.SequenceSchema):
trove_topics = col.SchemaNode(TroveCategory("Topic"))
class TroveLicenses(col.SequenceSchema):
trove_license = col.SchemaNode(TroveCategory("License"))
class TroveDatabases(col.SequenceSchema):
trove_databases = col.SchemaNode(TroveCategory("Database Environment"))
class TroveStatuses(col.SequenceSchema):
trove_statuses = col.SchemaNode(TroveCategory("Development Status"))
class TroveAudiences(col.SequenceSchema):
trove_audience = col.SchemaNode(TroveCategory("Intended Audience"))
class TroveOSes(col.SequenceSchema):
trove_oses = col.SchemaNode(TroveCategory("Operating System"))
class TroveLanguages(col.SequenceSchema):
trove_languages = col.SchemaNode(TroveCategory("Programming Language"))
class TroveTranslations(col.SequenceSchema):
trove_translations = col.SchemaNode(TroveCategory("Translations"))
class TroveUIs(col.SequenceSchema):
trove_uis = col.SchemaNode(TroveCategory("User Interface"))
class Labels(col.SequenceSchema):
label = col.SchemaNode(col.Str())
class Project(col.MappingSchema):
name = col.SchemaNode(ProjectNameType())
summary = col.SchemaNode(col.Str(), missing='')
description = col.SchemaNode(col.Str(), missing='')
admin = col.SchemaNode(User())
private = col.SchemaNode(col.Bool(), missing=False)
labels = Labels(missing=[])
external_homepage = col.SchemaNode(col.Str(), missing='')
trove_root_databases = TroveDatabases(missing=None)
trove_developmentstatuses = TroveStatuses(validator=col.Length(max=6), missing=None)
trove_audiences = TroveAudiences(validator=col.Length(max=6), missing=None)
trove_licenses = TroveLicenses(validator=col.Length(max=6), missing=None)
trove_oses = TroveOSes(missing=None)
trove_languages = TroveLanguages(validator=col.Length(max=6), missing=None)
trove_topics = TroveTopics(validator=col.Length(max=3), missing=None)
trove_natlanguages = TroveTranslations(missing=None)
trove_environments = TroveUIs(missing=None)
tool_data = col.SchemaNode(col.Mapping(unknown='preserve'), missing={})
icon = col.SchemaNode(col.Str(), missing=None)
# more fields are added dynamically to the schema in main()
def valid_shortname(project):
if project.shortname:
# already validated in ProjectShortnameType validator
return True
elif 3 <= len(project.name.shortname) <= 15:
return True
else:
return 'Project shortname "%s" must be between 3 and 15 characters' \
% project.name.shortname
class Projects(col.SequenceSchema):
project = Project(validator=col.Function(valid_shortname))
class Object(object):
def __init__(self, d):
self.__dict__.update(d)
def trove_ids(orig, new_):
if new_ is None:
return orig
return list(set(t._id for t in list(new_)))
def create_project(p, nbhd, options):
worker_name = multiprocessing.current_process().name
M.session.artifact_orm_session._get().skip_mod_date = True
shortname = p.shortname or p.name.shortname
project = M.Project.query.get(shortname=shortname,
neighborhood_id=nbhd._id)
project_template = nbhd.get_project_template()
if project and not (options.update and p.shortname):
log.warning('[%s] Skipping existing project "%s". To update an existing '
'project you must provide the project shortname and run '
'this script with --update.' % (worker_name, shortname))
return 0
if not project:
log.info('[%s] Creating project "%s".' % (worker_name, shortname))
try:
project = nbhd.register_project(shortname,
p.admin,
project_name=p.name.name,
private_project=p.private)
except Exception, e:
log.exception('[%s] %s' % (worker_name, str(e)))
return 0
else:
log.info('[%s] Updating project "%s".' % (worker_name, shortname))
project.notifications_disabled = True
if options.ensure_tools and 'tools' in project_template:
for i, tool in enumerate(project_template['tools'].iterkeys()):
tool_config = project_template['tools'][tool]
if project.app_instance(tool_config['mount_point']):
continue
tool_options = tool_config.get('options', {})
for k, v in tool_options.iteritems():
if isinstance(v, basestring):
tool_options[k] = string.Template(v).safe_substitute(
project.root_project.__dict__.get('root_project', {}))
project.install_app(tool,
mount_label=tool_config['label'],
mount_point=tool_config['mount_point'],
**tool_options)
project.summary = p.summary
project.short_description = p.description
project.external_homepage = p.external_homepage
project.last_updated = datetime.datetime.utcnow()
# These properties may have been populated by nbhd template defaults in
# register_project(). Overwrite if we have data, otherwise keep defaults.
project.labels = p.labels or project.labels
project.trove_root_database = trove_ids(project.trove_root_database, p.trove_root_databases)
project.trove_developmentstatus = trove_ids(project.trove_developmentstatus, p.trove_developmentstatuses)
project.trove_audience = trove_ids(project.trove_audience, p.trove_audiences)
project.trove_license = trove_ids(project.trove_license, p.trove_licenses)
project.trove_os = trove_ids(project.trove_os, p.trove_oses)
project.trove_language = trove_ids(project.trove_language, p.trove_languages)
project.trove_topic = trove_ids(project.trove_topic, p.trove_topics)
project.trove_natlanguage = trove_ids(project.trove_natlanguage, p.trove_natlanguages)
project.trove_environment = trove_ids(project.trove_environment, p.trove_environments)
project.tool_data.update(p.tool_data)
for a in p.awards:
M.AwardGrant(app_config_id=bson.ObjectId(),
award_id=a._id,
granted_to_project_id=project._id,
granted_by_neighborhood_id=nbhd._id)
if p.icon:
with open(p.icon) as icon_file:
project.save_icon(p.icon, icon_file)
project.notifications_disabled = False
with h.push_config(c, project=project, user=p.admin):
ThreadLocalORMSession.flush_all()
g.post_event('project_updated')
session(project).clear()
return 0
def create_projects(projects, nbhd, options):
for p in projects:
r = create_project(Object(p), nbhd, options)
if r != 0:
sys.exit(r)
def main(options):
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(getattr(logging, options.log_level.upper()))
log.debug(options)
nbhd = M.Neighborhood.query.get(name=options.neighborhood)
if not nbhd:
return 'Invalid neighborhood "%s".' % options.neighborhood
data = json.load(open(options.file, 'r'))
# dynamically add to the schema (e.g. if needs nbhd)
project = Project()
project.add(col.SchemaNode(col.Sequence(),
col.SchemaNode(Award(nbhd)),
name='awards', missing=[]))
project.add(col.SchemaNode(ProjectShortnameType(nbhd, options.update),
name='shortname', missing=None))
projects = []
for datum in data:
try:
projects.append(project.deserialize(datum))
except Exception:
keep_going = options.validate_only
log.error('Error on %s\n%s', datum['shortname'], datum, exc_info=keep_going)
if not keep_going:
raise
log.debug(projects)
if options.validate_only:
return
chunks = [projects[i::options.nprocs] for i in range(options.nprocs)]
jobs = []
for i in range(options.nprocs):
p = multiprocessing.Process(target=create_projects,
args=(chunks[i], nbhd, options), name='worker-' + str(i + 1))
jobs.append(p)
p.start()
for j in jobs:
j.join()
if j.exitcode != 0:
return j.exitcode
return 0
def parse_options():
import argparse
parser = argparse.ArgumentParser(
description='Import Allura project(s) from JSON file')
parser.add_argument('file', metavar='JSON_FILE', type=str,
help='Path to JSON file containing project data.')
parser.add_argument('neighborhood', metavar='NEIGHBORHOOD', type=str,
help='Destination Neighborhood shortname.')
parser.add_argument('--log', dest='log_level', default='INFO',
help='Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL).')
parser.add_argument('--update', dest='update', default=False,
action='store_true',
help='Update existing projects. Without this option, existing '
'projects will be skipped.')
parser.add_argument('--ensure-tools', dest='ensure_tools', default=False,
action='store_true',
help='Check nbhd project template for default tools, and install '
'them on the project(s) if not already installed.')
parser.add_argument(
'--nprocs', '-n', action='store', dest='nprocs', type=int,
help='Number of processes to divide the work among.',
default=multiprocessing.cpu_count())
parser.add_argument('--validate-only', '-v', action='store_true', dest='validate_only',
help='Validate ALL records, make no changes')
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main(parse_options()))
| |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Brief blurb on XidlimitsTests:
#
# As a consequence of disabling autovacuum, age of template0 database
# (or any other database having datallowconn='f') keeps on increasing.
# It eventually crosses xid_warn_limit, xid_stop_limit and finally
# wrap limit. A new database created using template0 as template
# inherits this age. These tests are intended to exercise procedures
# to rectify the age of such a newly created database when it has
# crossed the above mentioned limits (hence the name XidlimitsTests).
# The tests also ensure that autovacuum daemon is indeed disabled on
# master and segments.
from os.path import abspath, dirname, isdir
import sys
import unittest2 as unittest
from tinctest import logger
from tinctest.lib import Gpdiff, local_path
from mpp.lib.PSQL import PSQL, PSQLException
from mpp.gpdb.lib.models.sql.template import SQLTemplateTestCase
from mpp.models import MPPTestCase
import tinctest.lib.gpplatform as gpplatform
from gppylib.commands.base import Command, ExecutionError
from gppylib.db import dbconn
from gppylib.gparray import GpArray
FirstNormalTransactionId = 3
def xid_sum(x, y):
"""
XID arithmetic is tricky. See GetNewTransactionId for more information.
"""
ret = (x + y) % (2 ** 32)
if ret <= FirstNormalTransactionId:
ret += FirstNormalTransactionId
return ret
def preceding_xid(x, y):
"""
Returns the xid that precedes the other. Analogous to the standard min().
The logic here is equivalent to the step "(int) (x - y)" where x and y are
32-bit unsigned int variables in TransactionIdPrecedes() in transam.c.
"""
diff = x - y
if diff >= 2**31:
ret = x
elif diff > 0:
ret = y
elif diff > -2**31:
ret = x
else: # diff <= -2**31
ret = y
return ret
@unittest.skipIf(gpplatform.get_info()=="SOL", "Skipping Solaris, cannot compile xidhelper.so")
class XidlimitsTests(MPPTestCase):
"""
@description Xidlimits Tests
@created 2013-02-12 00:00:00
@modified 2013-02-12 00:00:00
@tags vacuum xidlimits echo
@gucs gp_create_table_random_default_distribution=off
"""
# Constants identifying the limit to exceed.
WARN_LIMIT = 0
STOP_LIMIT = 1
WRAP_LIMIT = 2
def setUp(self):
super(XidlimitsTests, self).setUp()
Command('re-build regress.so',
'make -C %s xidhelper.so' % local_path('.')).run(validateAfter=True)
SQLTemplateTestCase.perform_transformation_on_sqlfile(local_path('load_xidhelper.sql'),
local_path('load_xidhelper.sql.t'),
{'@source@' : local_path('xidhelper.so')})
PSQL.run_sql_file(sql_file=local_path('load_xidhelper.sql.t'),
out_file=local_path('load_xidhelper.out.t'))
self.gparray = GpArray.initFromCatalog(dbconn.DbURL(), utility=True)
def _basic_sanity_check(self, suffix, kwargs=None):
"""
Runs sanity.sql which contains a CREATE, INSERT, SELECT, and DROP.
@param kwargs: dictionary having keys are PSQL.run_sql_file argument
names and values as their values.
"""
out_file = local_path('sanity_%s.out' % suffix)
ans_file = local_path('sanity_%s.ans' % suffix)
if kwargs is None:
kwargs = {}
kwargs["sql_file"] = local_path('sanity_%s.sql' % suffix)
kwargs["out_file"] = out_file
try:
PSQL.run_sql_file(**kwargs)
except:
pass
assert Gpdiff.are_files_equal(out_file,
ans_file,
match_sub=[local_path('init_file')])
def _get_primary_mirror_pair(self):
"""
Return the (primary, mirror) having content id = 0.
"""
primary, mirror = None, None
for seg in self.gparray.getSegDbList(True):
if seg.content == 0:
if seg.role == 'p':
primary = seg
else:
mirror = seg
return (primary, mirror)
def _set_allowconn_template0(self, flag):
"""
Enable connections to template0 on master and all segments.
"""
if flag:
logger.info("Enabling connections to template0")
else:
logger.info("Disabling connections to template0")
for seg in self.gparray.getDbList(True):
if seg.role == 'p':
seg_url = dbconn.DbURL(hostname=seg.hostname, port=seg.port)
with dbconn.connect(seg_url,
utility=True,
allowSystemTableMods='dml') as conn:
dbconn.execSQL(
conn, "update pg_database set datallowconn=%s "
"where datname='template0'" % flag)
conn.commit()
def _restore_stop_limit_guc(self, datadir):
"""
Reset xid_stop_limit GUC to default value, by removing the setting from
postgresql.conf.
@param datadir: PGDATA directory containing postgresql.conf that needs
to be restored.
"""
logger.info("Undo the stop limit GUC change")
cmd = "source $GPHOME/greenplum_path.sh && gpstop -a"
Command("stop system", cmd).run(validateAfter=True)
cmd = ('sed -i".bk" "s|xid_stop_limit=.*||g" %s/postgresql.conf' %
datadir)
Command("undo xid_stop_limit change", cmd).run(validateAfter=True)
cmd = "source $GPHOME/greenplum_path.sh && gpstart -a"
Command("start system", cmd).run(validateAfter=True)
def _reduce_stop_limit_guc(self, segdb, new_slimit):
"""
Reduce the xid_stop_limit GUC by the specified value.
@param datadir: PGDATA directory containing postgresql.conf that needs
to be modified.
@param new_slimit: New value of xid_stop_limit GUC, less than the
default value of 10**9.
"""
for seg in self.gparray.getDbList(True):
logger.info("Stopping segment %d at %s" % (seg.dbid, seg.datadir))
cmd = "pg_ctl -D %s stop" % seg.datadir
Command("stop segment", cmd).run(validateAfter=True)
logger.info("New xid_stop_limit: %s" % new_slimit)
cmd = ('echo "xid_stop_limit=%d" >> %s/postgresql.conf' %
(new_slimit, segdb.datadir))
Command("revise xid_stop_limit", cmd).run(validateAfter=True)
logger.info("Starting the cluster")
cmd = "source $GPHOME/greenplum_path.sh && gpstart -a"
Command("start cluster", cmd).run(validateAfter=True)
dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
with dbconn.connect(dburl, utility=True) as conn:
stop_limit = int(
dbconn.execSQLForSingleton(conn, "SHOW xid_stop_limit"))
self.assertEqual(stop_limit, new_slimit, "Failed to set xid_stop_limit")
def _raise_template0_age(self, limit, segdb):
"""
Increase age of template0 beyond the specified limit on the specified
segment. When a new database is created off template0, the limit will
be exceeded. Assumption: template0 age =~ 0 or at least not already
crossing any of the xid limits. Because this function can only raise
the age, cannot decrease it.
@param limit: one of WARN_LIMIT, STOP_LIMIT and WRAP_LIMIT.
@param segdb: an instance of GpDB class representing the segment on
which the limit will be exceeded.
"""
dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
databases = []
with dbconn.connect(dburl, utility=True) as conn:
sql = "SELECT datname FROM pg_database WHERE datallowconn='t'"
for row in dbconn.execSQL(conn, sql):
databases.append(row[0])
sql = "SHOW xid_stop_limit"
stop_limit_guc = int(dbconn.execSQLForSingleton(conn, sql))
sql = "SHOW xid_warn_limit"
warn_limit_guc = int(dbconn.execSQLForSingleton(conn, sql))
sql = ("SELECT datfrozenxid, age(datfrozenxid) FROM pg_database "
"WHERE datname='template0'")
row = dbconn.execSQL(conn, sql).fetchone()
datfxid, age = int(row[0]), int(row[1])
sql = "SELECT get_next_xid()"
current_xid = int(dbconn.execSQLForSingleton(conn, sql))
# Estimate of XIDs consumed by vacuum freeze operaiton on all databases.
vacuum_xids = len(databases) * 500
logger.info("Estimated xids for vacuume freeze: %d" % vacuum_xids)
if limit == self.WARN_LIMIT:
target_age = (2**31) - stop_limit_guc - warn_limit_guc
target_xid = xid_sum(datfxid, target_age)
keep_raising = lambda x: x < target_age
elif limit == self.STOP_LIMIT:
target_age = (2**31) - stop_limit_guc
target_xid = xid_sum(datfxid, target_age)
keep_raising = lambda x: x < target_age
elif limit == self.WRAP_LIMIT:
target_xid = xid_sum(datfxid, 2**31)
keep_raising = lambda x: x > 0
logger.info("Target xid = %d, limit = %d" % (target_xid, limit))
self.assertEqual(preceding_xid(target_xid, current_xid), current_xid,
"Target xid (%d) precedes current xid (%d)" %
(target_xid, current_xid))
while keep_raising(age):
with dbconn.connect(dburl, utility=True) as conn:
sql = "SELECT get_stop_limit()"
stop_limit = int(dbconn.execSQLForSingleton(conn, sql))
# GPDB may stop accepting connections if we spoof nextXid beyond
# max_xid.
max_xid = xid_sum(stop_limit, -vacuum_xids)
new_xid = preceding_xid(target_xid, max_xid)
logger.info("Spoofing next xid to %d, current stop limit = %d" %
(new_xid, stop_limit))
sql = "SELECT spoof_next_xid('%d'::xid)"
dbconn.execSQL(conn, sql % new_xid)
conn.commit()
sql = ("SELECT age(datfrozenxid) FROM pg_database "
"WHERE datname='template0'")
age = int(dbconn.execSQLForSingleton(conn, sql))
logger.info("template0 age raised to %d" % age)
# The vacuum freeze of all databases advances stop_limit further,
# necessary for iterating the while loop. And template0 becomes the
# oldest database aka the only culprit to violate the specified
# limit.
PSQL(sql_cmd='VACUUM FREEZE', dbname='postgres', out_file='vacuum_postgres.out').run(validateAfter=True)
for datname in databases:
logger.info('vacuum freeze %s' % datname)
PSQL(sql_cmd='VACUUM FREEZE',
dbname=datname,
out_file='vacuum_%s.out' % datname).run(validateAfter=True)
def _reset_age(self, dbname, segdb=None):
"""
Resets datfrozenxid and relfrozenxid's in pg_class of the
specified dbname to a value close to the current xid. This is
a recommended way of resetting age of dbname or a database
that is created off template0.
@param segdb: identifies the segment on which to operate. It is an
instance of GpDB class.
Note that the database dbname must have all tuples frozen (xmin=2).
This holds true of template0 and of a database created off template0,
only if there are no modifications done to the database.
"""
if segdb is None:
segdb = self.gparray.master
dburl = dbconn.DbURL(hostname=segdb.hostname, port=segdb.port)
dburl_dbname = dbconn.DbURL(hostname=segdb.hostname,
port=segdb.port,
dbname=dbname)
with dbconn.connect(dburl,
utility=True,
allowSystemTableMods="dml") as conn:
sql = "SELECT get_next_xid()"
next_xid = int(dbconn.execSQLForSingleton(conn, sql))
sql = "UPDATE pg_database SET datfrozenxid='%d'::xid WHERE datname='%s'"
dbconn.execSQL(conn, sql % (next_xid, dbname))
conn.commit()
if dbname == "template0":
self._set_allowconn_template0(True)
with dbconn.connect(dburl_dbname,
utility=True,
allowSystemTableMods="dml") as conn:
sql = ("UPDATE pg_class SET relfrozenxid='%d'::xid WHERE "
"int8in(xidout(relfrozenxid)) > 0")
dbconn.execSQL(conn, sql % next_xid)
conn.commit()
PSQL(sql_cmd="VACUUM FREEZE pg_class",
dbname=dbname,
PGOPTIONS="-c 'gp_session_role=utility'",
host=segdb.hostname,
port=segdb.port,
out_file="vacuum_%s.out" % dbname).run(validateAfter=True)
with dbconn.connect(dburl_dbname,
utility=True,
allowSystemTableMods="dml") as conn:
dbconn.execSQL(conn, "DELETE FROM pg_stat_last_operation")
conn.commit()
PSQL(sql_cmd="VACUUM FREEZE pg_stat_last_operation",
dbname=dbname,
PGOPTIONS="-c 'gp_session_role=utility'",
host=segdb.hostname,
port=segdb.port,
out_file="vacuum_%s.out" % dbname).run(validateAfter=True)
if dbname == "template0":
self._set_allowconn_template0(False)
with dbconn.connect(dburl, utility=True) as conn:
sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='%s'"
age_dbname = dbconn.execSQLForSingleton(conn, sql % dbname)
age_dbname = int(age_dbname)
logger.info("Age of %s reset to %d" % (dbname, age_dbname))
# We are OK as long as dbname age is less than xid_warn_limit. The
# 10000 is just a number assumed to be less than xid_warn_limit.
self.assertTrue(age_dbname > 0 and age_dbname < 10000,
"age(%s) = %d, next xid = %d" %
(dbname, age_dbname, next_xid))
def test_autovacuum_signaling(self):
"""
Raise the nextXid to oldest_frozenxid + autovacuum_freeze_max_age.
Run a transaction.
Ensure that no autovacuum daemon is started.
"""
dburl = dbconn.DbURL()
with dbconn.connect(dburl) as conn:
oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
logger.info('Raising master xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)
# A new connection to the postmaster, at this point, will ensure that we roll through
# the ServerLoop and potentially fork an autovacuum process... if enabled.
# Burn a transaction to trigger any undesirable behavior that we're disabling.
with dbconn.connect(dburl) as conn:
self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))
cmd = Command('check for autovacuum',
'ps -ef | grep -v grep | grep postgres | grep autovacuum')
cmd.run()
self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")
self._basic_sanity_check('clean')
def test_autovacuum_signaling_on_segment(self):
"""
Same as above, but on a segment.
"""
# connect to the master to build gparray
primary, _ = self._get_primary_mirror_pair()
logger.info('Isolated segment %d at %s:%d' % (primary.dbid, primary.hostname, primary.port))
dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
with dbconn.connect(dburl, utility=True) as conn:
oldest_xid = int(dbconn.execSQLForSingleton(conn, 'select get_oldest_xid()'))
autovacuum_freeze_max_age = int(dbconn.execSQLForSingleton(conn, 'show autovacuum_freeze_max_age'))
autovacuum_xid_limit = xid_sum(oldest_xid, autovacuum_freeze_max_age)
logger.info('Raising segment xid to autovacuum_xid_limit %d' % autovacuum_xid_limit)
dbconn.execSQLForSingleton(conn, "select spoof_next_xid('%d'::xid)" % autovacuum_xid_limit)
# A new connection to the postmaster, at this point, will ensure that we roll through
# the ServerLoop and potentially fork an autovacuum process... if enabled.
with dbconn.connect(dburl, utility=True) as conn:
self.assertEqual(1, int(dbconn.execSQLForSingleton(conn, 'select 1')))
cmd = Command('check for autovacuum',
'ssh %s ps -ef | grep -v grep | grep postgres | grep autovacuum' % primary.hostname)
cmd.run()
self.assertEqual(cmd.get_results().stdout, "", "Seriously? Found a postgres autovacuum process!")
self._basic_sanity_check('clean')
def test_template0_age_limits_master(self):
"""
Increase template0 age on master in steps:
1. Cross warn limit on segment
2. Cross stop limit on segment
3. Cause wrap around on segment
"""
self.template0_warn_limit()
self.template0_stop_limit()
self.template0_wrap_around()
# Clean up.
self._reset_age("template0")
def template0_warn_limit(self):
"""
Raise next xid so that age(template0) grows beyond warn limit.
Create a new database from template0, which will inherit age
of template0. Ensure that warnings stop when vacuum freeze is
run on the new database.
"""
# Bump up age of template0 to cause warn limit violation.
self._raise_template0_age(self.WARN_LIMIT, self.gparray.master)
# All is well until we create a new db off template0.
self._basic_sanity_check("clean")
# Create database newdb off template0.
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
# newdb is now the oldest database, older than warn limit.
self._basic_sanity_check("warn")
# Ensure that vacuum freeze on newdb stops the warnings.
PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb",
out_file="vacuum_newdb_wl_master.out").run(validateAfter=True)
self._basic_sanity_check("clean")
PSQL.drop_database(dbname="newdb")
def template0_stop_limit(self):
"""
Raise next xid so that age(template0) grows beyond stop limit.
Create a new database off template0, let GPDB stop accepting
commands. Recover GPDB using the documented proceudure.
Ensure that the new database is sane.
"""
dburl = dbconn.DbURL()
with dbconn.connect(dburl, utility=True) as conn:
sql = "SHOW xid_stop_limit"
slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
new_limit = xid_sum(slimit_guc, -(10**6))
# Raise nextXid so that template0 age would cross stop limit.
self._raise_template0_age(self.STOP_LIMIT, self.gparray.master)
# newdb's age crosses stop limit and GPDB stops accepting commands.
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
self._basic_sanity_check("error")
# Reduce xid_stop_limit as per the standard procedure.
self._reduce_stop_limit_guc(self.gparray.master, new_limit)
# Vacuum freezing newdb should be suffice to recover.
PSQL(sql_cmd="VACUUM FREEZE",
dbname="newdb",
out_file="vacuum_newdb_stop_master.out").run(validateAfter=True)
self._basic_sanity_check("clean")
PSQL.drop_database(dbname="newdb")
self._restore_stop_limit_guc(self.gparray.master.datadir)
def template0_wrap_around(self):
"""
Raise next xid so that age(template0) suffers a wrap around and
becomes negative. Create a new database off template0, which
also suffers wrap around. Reset the new db's age. Sanity
must succeed on the new db.
"""
self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master)
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
dburl = dbconn.DbURL()
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
# Xid wrap-around should cause template0 and newdb's age to be negative.
self.assertTrue(age_newdb < 0)
# All xids in newdb are frozen at this point. Therefore, we
# can reset its age so that it is not negative.
self._reset_age("newdb")
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
self.assertTrue(age_newdb > 0)
# Verify that normal operations can be performed on newdb post recovery
# from wraparound.
self._basic_sanity_check("clean", {"dbname":"newdb"})
logger.info("Sanity succeeded on newdb, dropping it.")
PSQL.drop_database(dbname="newdb")
def test_template0_age_limits_segment(self):
"""
Increase template0 age on a segment in steps:
1. Cross warn limit on segment
2. Cross stop limit on segment
3. Cause wrap around on segment
"""
primary, _ = self._get_primary_mirror_pair()
self.template0_warn_limit_on_segment(primary)
# self.template0_stop_limit_on_segment(primary)
# self.template0_wrap_around_on_segment(primary)
# Clean up.
# self._reset_age("template0", primary)
def template0_warn_limit_on_segment(self, primary):
"""
Same as template0_warn_limit, but on a segment.
"""
logger.info("template0_warn_limit_on_segment: dbid(%d) %s:%d'" %
(primary.dbid, primary.hostname, primary.port))
# Bump up age of template0 to cause warn limit violation.
self._raise_template0_age(self.WARN_LIMIT, primary)
# All is well until we create a new db off template0.
self._basic_sanity_check("clean")
# Create database newdb off template0.
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
logger.info("newdb created off template0")
# newdb is now the oldest database, older than warn limit.
self._basic_sanity_check("warn_segment")
# Ensure that vacuum freeze on newdb stops the warnings.
PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb",
out_file="vacuum_newdb_warn_seg.out").run(validateAfter=True)
self._basic_sanity_check("clean")
PSQL.drop_database(dbname="newdb")
def template0_stop_limit_on_segment(self, primary):
"""
Same as template0_stop_limit, but on segment.
"""
logger.info("template0_stop_limit_on_segment: dbid(%d) %s:%d'" %
(primary.dbid, primary.hostname, primary.port))
dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
with dbconn.connect(dburl, utility=True) as conn:
sql = "SHOW xid_stop_limit"
slimit_guc = int(dbconn.execSQLForSingleton(conn, sql))
new_limit = xid_sum(slimit_guc, -(10**6))
# Raise nextXid so that template0 age would cross stop limit.
self._raise_template0_age(self.STOP_LIMIT, primary)
# newdb's age crosses stop limit and GPDB stops accepting commands.
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
logger.info("newdb created off template0")
# Ensure that utility connections to the segment fail with error.
psql_args = {"PGOPTIONS":"-c 'gp_session_role=utility'",
"host":primary.hostname,
"port":primary.port}
self._basic_sanity_check("error", psql_args)
logger.info("Utility connection to dbid(%d) reported stop limit "
"error, as expected." % primary.dbid)
try:
# Verify that SQL commands from master fail.
PSQL(sql_cmd="CREATE TABLE test (a int, b int)").run(
validateAfter=True)
self.fail("CREATE TABLE succeeded from master, when expecting "
"stop limit error on segment.")
except ExecutionError:
logger.info("CREATE TABLE failed from master, as expected.")
# Reduce xid_stop_limit as per the standard procedure.
self._reduce_stop_limit_guc(primary, new_limit)
# Vacuum freezing newdb should be suffice to recover.
PSQL(sql_cmd="VACUUM FREEZE", dbname="newdb",
out_file="vacuum_newdb_wl.out").run(validateAfter=True)
# Ensure that utility connections to the segment are successful.
sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
self.assertTrue(age_newdb > 0)
# Verify SQL commands from master are successful.
self._basic_sanity_check("clean")
self._restore_stop_limit_guc(primary.datadir)
# Verify SQL commands after restoring xid_stop_limit GUC.
self._basic_sanity_check("clean")
PSQL.drop_database(dbname="newdb")
def template0_wrap_around_on_segment(self, primary):
"""
Same as template0_wrap_around, but on segment.
"""
logger.info("template0_wrap_around_on_segment: dbid(%d) %s:%d'" %
(primary.dbid, primary.hostname, primary.port))
self._raise_template0_age(self.WRAP_LIMIT, primary)
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
# Verify that age of newdb on the segment is negative.
dburl = dbconn.DbURL(hostname=primary.hostname, port=primary.port)
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
self.assertTrue(age_newdb < 0)
# Reset newdb age so as to recover from wrap around.
self._reset_age("newdb", primary)
# Verify that normal operations can be performed on newdb whose age was
# reset to a correct value.
self._basic_sanity_check("clean", {"dbname":"newdb"})
# Verify that age of newdb on the segment is valid.
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
self.assertTrue(age_newdb > 0)
PSQL.drop_database(dbname="newdb")
| |
"""
Admin commands
"""
import time, re
from django.conf import settings
from django.contrib.auth.models import User
from src.server.sessionhandler import SESSIONS
from src.server.models import ServerConfig
from src.utils import utils, prettytable, search
from src.commands.default.muxcommand import MuxCommand
PERMISSION_HIERARCHY = [p.lower() for p in settings.PERMISSION_HIERARCHY]
# limit members for API inclusion
__all__ = ("CmdBoot", "CmdBan", "CmdUnban", "CmdDelPlayer", "CmdEmit", "CmdNewPassword",
"CmdPerm", "CmdWall")
class CmdBoot(MuxCommand):
"""
@boot
Usage
@boot[/switches] <player obj> [: reason]
Switches:
quiet - Silently boot without informing player
port - boot by port number instead of name or dbref
Boot a player object from the server. If a reason is
supplied it will be echoed to the user unless /quiet is set.
"""
key = "@boot"
locks = "cmd:perm(boot) or perm(Wizards)"
help_category = "Admin"
def func(self):
"Implementing the function"
caller = self.caller
args = self.args
if not args:
caller.msg("Usage: @boot[/switches] <player> [:reason]")
return
if ':' in args:
args, reason = [a.strip() for a in args.split(':', 1)]
else:
args, reason = args, ""
boot_list = []
if 'port' in self.switches:
# Boot a particular port.
sessions = SESSIONS.get_session_list(True)
for sess in sessions:
# Find the session with the matching port number.
if sess.getClientAddress()[1] == int(args):
boot_list.append(sess)
break
else:
# Boot by player object
pobj = search.player_search(args)
if not pobj:
self.caller("Player %s was not found." % pobj.key)
return
pobj = pobj[0]
if not pobj.access(caller, 'boot'):
string = "You don't have the permission to boot %s."
pobj.msg(string)
return
# we have a bootable object with a connected user
matches = SESSIONS.sessions_from_player(pobj)
for match in matches:
boot_list.append(match)
if not boot_list:
caller.msg("No matching sessions found. The Player does not seem to be online.")
return
# Carry out the booting of the sessions in the boot list.
feedback = None
if not 'quiet' in self.switches:
feedback = "You have been disconnected by %s.\n" % caller.name
if reason:
feedback += "\nReason given: %s" % reason
for session in boot_list:
session.msg(feedback)
pobj.disconnect_session_from_player(session.sessid)
# regex matching IP addresses with wildcards, eg. 233.122.4.*
IPREGEX = re.compile(r"[0-9*]{1,3}\.[0-9*]{1,3}\.[0-9*]{1,3}\.[0-9*]{1,3}")
def list_bans(banlist):
"""
Helper function to display a list of active bans. Input argument
is the banlist read into the two commands @ban and @unban below.
"""
if not banlist:
return "No active bans were found."
table = prettytable.PrettyTable(["{wid", "{wname/ip", "{wdate", "{wreason"])
for inum, ban in enumerate(banlist):
table.add_row([str(inum+1),
ban[0] and ban[0] or ban[1],
ban[3], ban[4]])
string = "{wActive bans:{n\n%s" % table
return string
class CmdBan(MuxCommand):
"""
ban a player from the server
Usage:
@ban [<name or ip> [: reason]]
Without any arguments, shows numbered list of active bans.
This command bans a user from accessing the game. Supply an
optional reason to be able to later remember why the ban was put in
place
It is often to
prefer over deleting a player with @delplayer. If banned by name,
that player account can no longer be logged into.
IP (Internet Protocol) address banning allows to block all access
from a specific address or subnet. Use the asterisk (*) as a
wildcard.
Examples:
@ban thomas - ban account 'thomas'
@ban/ip 134.233.2.111 - ban specific ip address
@ban/ip 134.233.2.* - ban all in a subnet
@ban/ip 134.233.*.* - even wider ban
A single IP filter is easy to circumvent by changing the computer
(also, some ISPs assign only temporary IPs to their users in the
first placer. Widening the IP block filter with wildcards might be
tempting, but remember that blocking too much may accidentally
also block innocent users connecting from the same country and
region.
"""
key = "@ban"
aliases = ["@bans"]
locks = "cmd:perm(ban) or perm(Immortals)"
help_category="Admin"
def func(self):
"""
Bans are stored in a serverconf db object as a list of
dictionaries:
[ (name, ip, ipregex, date, reason),
(name, ip, ipregex, date, reason),... ]
where name and ip are set by the user and are shown in
lists. ipregex is a converted form of ip where the * is
replaced by an appropriate regex pattern for fast
matching. date is the time stamp the ban was instigated and
'reason' is any optional info given to the command. Unset
values in each tuple is set to the empty string.
"""
banlist = ServerConfig.objects.conf('server_bans')
if not banlist:
banlist = []
if not self.args or (self.switches
and not any(switch in ('ip', 'name') for switch in self.switches)):
self.caller.msg(list_bans(banlist))
return
now = time.ctime()
reason = ""
if ':' in self.args:
ban, reason = self.args.rsplit(':',1)
else:
ban = self.args
ban = ban.lower()
ipban = IPREGEX.findall(ban)
if not ipban:
# store as name
typ = "Name"
bantup = (ban, "", "", now, reason)
else:
# an ip address.
typ = "IP"
ban = ipban[0]
# replace * with regex form and compile it
ipregex = ban.replace('.','\.')
ipregex = ipregex.replace('*', '[0-9]{1,3}')
#print "regex:",ipregex
ipregex = re.compile(r"%s" % ipregex)
bantup = ("", ban, ipregex, now, reason)
# save updated banlist
banlist.append(bantup)
ServerConfig.objects.conf('server_bans', banlist)
self.caller.msg("%s-Ban {w%s{x was added." % (typ, ban))
class CmdUnban(MuxCommand):
"""
remove a ban
Usage:
@unban <banid>
This will clear a player name/ip ban previously set with the @ban
command. Use this command without an argument to view a numbered
list of bans. Use the numbers in this list to select which one to
unban.
"""
key = "@unban"
locks = "cmd:perm(unban) or perm(Immortals)"
help_category="Admin"
def func(self):
"Implement unbanning"
banlist = ServerConfig.objects.conf('server_bans')
if not self.args:
self.caller.msg(list_bans(banlist))
return
try:
num = int(self.args)
except Exception:
self.caller.msg("You must supply a valid ban id to clear.")
return
if not banlist:
self.caller.msg("There are no bans to clear.")
elif not (0 < num < len(banlist) + 1):
self.caller.msg("Ban id {w%s{x was not found." % self.args)
else:
# all is ok, clear ban
ban = banlist[num-1]
del banlist[num-1]
ServerConfig.objects.conf('server_bans', banlist)
self.caller.msg("Cleared ban %s: %s" % (num, " ".join([s for s in ban[:2]])))
class CmdDelPlayer(MuxCommand):
"""
delplayer - delete player from server
Usage:
@delplayer[/switch] <name> [: reason]
Switch:
delobj - also delete the player's currently
assigned in-game object.
Completely deletes a user from the server database,
making their nick and e-mail again available.
"""
key = "@delplayer"
locks = "cmd:perm(delplayer) or perm(Immortals)"
help_category = "Admin"
def func(self):
"Implements the command."
caller = self.caller
args = self.args
if hasattr(caller, 'player'):
caller = caller.player
if not args:
self.msg("Usage: @delplayer[/delobj] <player/user name or #id> [: reason]")
return
reason = ""
if ':' in args:
args, reason = [arg.strip() for arg in args.split(':', 1)]
# We use player_search since we want to be sure to find also players
# that lack characters.
players = caller.search_player(args, quiet=True)
if not players:
# try to find a user instead of a Player
try:
user = User.objects.get(id=args)
except Exception:
try:
user = User.objects.get(username__iexact=args)
except Exception:
string = "No Player nor User found matching '%s'." % args
self.msg(string)
return
try:
player = user.get_profile()
except Exception:
player = None
if player and not player.access(caller, 'delete'):
string = "You don't have the permissions to delete this player."
self.msg(string)
return
string = ""
name = user.username
user.delete()
if player:
name = player.name
player.delete()
string = "Player %s was deleted." % name
else:
string += "The User %s was deleted. It had no Player associated with it." % name
self.msg(string)
return
elif utils.is_iter(players):
string = "There were multiple matches:"
for player in players:
string += "\n %s %s" % (player.id, player.key)
return
else:
# one single match
player = players
user = player.user
if not player.access(caller, 'delete'):
string = "You don't have the permissions to delete that player."
self.msg(string)
return
uname = user.username
# boot the player then delete
self.msg("Informing and disconnecting player ...")
string = "\nYour account '%s' is being *permanently* deleted.\n" % uname
if reason:
string += " Reason given:\n '%s'" % reason
player.unpuppet_all()
for session in SESSIONS.sessions_from_player(player):
player.msg(string, sessid=session.sessid)
player.disconnect_session_from_player(session.sessid)
user.delete()
player.delete()
self.msg("Player %s was successfully deleted." % uname)
class CmdEmit(MuxCommand):
"""
@emit
Usage:
@emit[/switches] [<obj>, <obj>, ... =] <message>
@remit [<obj>, <obj>, ... =] <message>
@pemit [<obj>, <obj>, ... =] <message>
Switches:
room : limit emits to rooms only (default)
players : limit emits to players only
contents : send to the contents of matched objects too
Emits a message to the selected objects or to
your immediate surroundings. If the object is a room,
send to its contents. @remit and @pemit are just
limited forms of @emit, for sending to rooms and
to players respectively.
"""
key = "@emit"
aliases = ["@pemit", "@remit"]
locks = "cmd:perm(emit) or perm(Builders)"
help_category = "Admin"
def func(self):
"Implement the command"
caller = self.caller
args = self.args
if not args:
string = "Usage: "
string += "\n@emit[/switches] [<obj>, <obj>, ... =] <message>"
string += "\n@remit [<obj>, <obj>, ... =] <message>"
string += "\n@pemit [<obj>, <obj>, ... =] <message>"
caller.msg(string)
return
rooms_only = 'rooms' in self.switches
players_only = 'players' in self.switches
send_to_contents = 'contents' in self.switches
# we check which command was used to force the switches
if self.cmdstring == '@remit':
rooms_only = True
elif self.cmdstring == '@pemit':
players_only = True
if not self.rhs:
message = self.args
objnames = [caller.location.key]
else:
message = self.rhs
objnames = self.lhslist
# send to all objects
for objname in objnames:
obj = caller.search(objname, global_search=True)
if not obj:
return
if rooms_only and not obj.location == None:
caller.msg("%s is not a room. Ignored." % objname)
continue
if players_only and not obj.has_player:
caller.msg("%s has no active player. Ignored." % objname)
continue
if obj.access(caller, 'tell'):
obj.msg(message)
if send_to_contents:
for content in obj.contents:
content.msg(message)
caller.msg("Emitted to %s and its contents." % objname)
else:
caller.msg("Emitted to %s." % objname)
else:
caller.msg("You are not allowed to emit to %s." % objname)
class CmdNewPassword(MuxCommand):
"""
@userpassword
Usage:
@userpassword <user obj> = <new password>
Set a player's password.
"""
key = "@userpassword"
locks = "cmd:perm(newpassword) or perm(Wizards)"
help_category = "Admin"
def func(self):
"Implement the function."
caller = self.caller
if not self.rhs:
self.msg("Usage: @userpassword <user obj> = <new password>")
return
# the player search also matches 'me' etc.
player = caller.search_player(self.lhs)
if not player:
return
player.user.set_password(self.rhs)
player.user.save()
self.msg("%s - new password set to '%s'." % (player.name, self.rhs))
if player.character != caller:
player.msg("%s has changed your password to '%s'." % (caller.name, self.rhs))
class CmdPerm(MuxCommand):
"""
@perm - set permissions
Usage:
@perm[/switch] <object> [= <permission>[,<permission>,...]]
@perm[/switch] *<player> [= <permission>[,<permission>,...]]
Switches:
del : delete the given permission from <object> or <player>.
player : set permission on a player (same as adding * to name)
This command sets/clears individual permission strings on an object
or player. If no permission is given, list all permissions on <object>.
"""
key = "@perm"
aliases = "@setperm"
locks = "cmd:perm(perm) or perm(Immortals)"
help_category = "Admin"
def func(self):
"Implement function"
caller = self.caller
switches = self.switches
lhs, rhs = self.lhs, self.rhs
if not self.args:
string = "Usage: @perm[/switch] object [ = permission, permission, ...]"
caller.msg(string)
return
playermode = 'player' in self.switches or lhs.startswith('*')
if playermode:
obj = caller.search_player(lhs)
else:
obj = caller.search(lhs, global_search=True)
if not obj:
return
if not rhs:
if not obj.access(caller, 'examine'):
caller.msg("You are not allowed to examine this object.")
return
string = "Permissions on {w%s{n: " % obj.key
if not obj.permissions:
string += "<None>"
else:
string += ", ".join(obj.permissions)
if hasattr(obj, 'player') and hasattr(obj.player, 'is_superuser') and obj.player.is_superuser:
string += "\n(... but this object is currently controlled by a SUPERUSER! "
string += "All access checks are passed automatically.)"
caller.msg(string)
return
# we supplied an argument on the form obj = perm
if not obj.access(caller, 'control'):
caller.msg("You are not allowed to edit this object's permissions.")
return
cstring = ""
tstring = ""
if 'del' in switches:
# delete the given permission(s) from object.
for perm in self.rhslist:
try:
index = obj.permissions.index(perm)
except ValueError:
cstring += "\nPermission '%s' was not defined on %s." % (perm, obj.name)
continue
permissions = obj.permissions
del permissions[index]
obj.permissions = permissions
cstring += "\nPermission '%s' was removed from %s." % (perm, obj.name)
tstring += "\n%s revokes the permission '%s' from you." % (caller.name, perm)
else:
# add a new permission
permissions = obj.permissions
for perm in self.rhslist:
# don't allow to set a permission higher in the hierarchy than the one the
# caller has (to prevent self-escalation)
if perm.lower() in PERMISSION_HIERARCHY and not obj.locks.check_lockstring(caller, "dummy:perm(%s)" % perm):
caller.msg("You cannot assign a permission higher than the one you have yourself.")
return
if perm in permissions:
cstring += "\nPermission '%s' is already defined on %s." % (rhs, obj.name)
else:
permissions.append(perm)
obj.permissions = permissions
cstring += "\nPermission '%s' given to %s." % (rhs, obj.name)
tstring += "\n%s gives you the permission '%s'." % (caller.name, rhs)
caller.msg(cstring.strip())
if tstring:
obj.msg(tstring.strip())
class CmdWall(MuxCommand):
"""
@wall
Usage:
@wall <message>
Announces a message to all connected players.
"""
key = "@wall"
locks = "cmd:perm(wall) or perm(Wizards)"
help_category = "Admin"
def func(self):
"Implements command"
if not self.args:
self.caller.msg("Usage: @wall <message>")
return
message = "%s shouts \"%s\"" % (self.caller.name, self.args)
self.msg("Announcing to all connected players ...")
SESSIONS.announce_all(message)
| |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to download all datasets and create .tfrecord files.
"""
import collections
import gzip
import os
import sys
import tarfile
import tempfile
from urllib import request
from easydict import EasyDict
from libml.data import DATA_DIR
import numpy as np
import scipy.io
import tensorflow as tf
from tqdm import trange
URLS = {
'svhn': 'http://ufldl.stanford.edu/housenumbers/{}_32x32.mat',
'cifar10': 'https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz',
'cifar100': 'https://www.cs.toronto.edu/~kriz/cifar-100-matlab.tar.gz',
'stl10': 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz',
}
def _encode_png(images):
raw = []
with tf.Session() as sess, tf.device('cpu:0'):
image_x = tf.placeholder(tf.uint8, [None, None, None], 'image_x')
to_png = tf.image.encode_png(image_x)
for x in trange(images.shape[0], desc='PNG Encoding', leave=False):
raw.append(sess.run(to_png, feed_dict={image_x: images[x]}))
return raw
def _load_svhn():
splits = collections.OrderedDict()
for split in ['train', 'test', 'extra']:
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['svhn'].format(split), f.name)
data_dict = scipy.io.loadmat(f.name)
dataset = {}
dataset['images'] = np.transpose(data_dict['X'], [3, 0, 1, 2])
dataset['images'] = _encode_png(dataset['images'])
dataset['labels'] = data_dict['y'].reshape((-1))
# SVHN raw data uses labels from 1 to 10; use 0 to 9 instead.
dataset['labels'] -= 1
splits[split] = dataset
return splits
def _load_stl10():
def unflatten(images):
return np.transpose(images.reshape((-1, 3, 96, 96)),
[0, 3, 2, 1])
with tempfile.NamedTemporaryFile() as f:
if os.path.exists('stl10/stl10_binary.tar.gz'):
f = open('stl10/stl10_binary.tar.gz', 'rb')
else:
request.urlretrieve(URLS['stl10'], f.name)
tar = tarfile.open(fileobj=f)
train_X = tar.extractfile('stl10_binary/train_X.bin')
train_y = tar.extractfile('stl10_binary/train_y.bin')
test_X = tar.extractfile('stl10_binary/test_X.bin')
test_y = tar.extractfile('stl10_binary/test_y.bin')
unlabeled_X = tar.extractfile('stl10_binary/unlabeled_X.bin')
train_set = {'images': np.frombuffer(train_X.read(), dtype=np.uint8),
'labels': np.frombuffer(train_y.read(), dtype=np.uint8) - 1}
test_set = {'images': np.frombuffer(test_X.read(), dtype=np.uint8),
'labels': np.frombuffer(test_y.read(), dtype=np.uint8) - 1}
_imgs = np.frombuffer(unlabeled_X.read(), dtype=np.uint8)
unlabeled_set = {'images': _imgs,
'labels': np.zeros(100000, dtype=np.uint8)}
fold_indices = tar.extractfile('stl10_binary/fold_indices.txt').read()
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
unlabeled_set['images'] = _encode_png(unflatten(unlabeled_set['images']))
return dict(train=train_set, test=test_set, unlabeled=unlabeled_set,
files=[EasyDict(filename="stl10_fold_indices.txt", data=fold_indices)])
def _load_cifar10():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),
[0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar10'], f.name)
tar = tarfile.open(fileobj=f)
train_data_batches, train_data_labels = [], []
for batch in range(1, 6):
data_dict = scipy.io.loadmat(tar.extractfile(
'cifar-10-batches-mat/data_batch_{}.mat'.format(batch)))
train_data_batches.append(data_dict['data'])
train_data_labels.append(data_dict['labels'].flatten())
train_set = {'images': np.concatenate(train_data_batches, axis=0),
'labels': np.concatenate(train_data_labels, axis=0)}
data_dict = scipy.io.loadmat(tar.extractfile(
'cifar-10-batches-mat/test_batch.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _load_cifar100():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)),
[0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar100'], f.name)
tar = tarfile.open(fileobj=f)
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/train.mat'))
train_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/test.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _save_as_tfrecord(data, filename):
assert len(data['images']) == len(data['labels'])
filename = os.path.join(DATA_DIR, filename + '.tfrecord')
print('Saving dataset:', filename)
with tf.python_io.TFRecordWriter(filename) as writer:
for x in trange(len(data['images']), desc='Building records'):
feat = dict(image=_bytes_feature(data['images'][x]),
label=_int64_feature(data['labels'][x]))
record = tf.train.Example(features=tf.train.Features(feature=feat))
writer.write(record.SerializeToString())
print('Saved:', filename)
def _is_installed(name, checksums):
for subset, checksum in checksums.items():
filename = os.path.join(DATA_DIR, '%s-%s.tfrecord' % (name, subset))
if not os.path.exists(filename):
return False
return True
def _save_files(files, *args, **kwargs):
del args, kwargs
for folder in frozenset(os.path.dirname(x) for x in files):
os.makedirs(os.path.join(DATA_DIR, folder), exist_ok=True)
for filename, contents in files.items():
with open(os.path.join(DATA_DIR, filename), 'w') as f:
f.write(contents)
def _is_installed_folder(name, folder):
return os.path.exists(os.path.join(DATA_DIR, name, folder))
CONFIGS = dict(
cifar10=dict(loader=_load_cifar10,
checksums=dict(train=None, test=None)),
cifar100=dict(loader=_load_cifar100,
checksums=dict(train=None, test=None)),
svhn=dict(loader=_load_svhn,
checksums=dict(train=None, test=None, extra=None)),
stl10=dict(loader=_load_stl10,
checksums=dict(train=None, test=None)),
)
if __name__ == '__main__':
if len(sys.argv[1:]):
subset = set(sys.argv[1:])
else:
subset = set(CONFIGS.keys())
try:
os.makedirs(DATA_DIR)
except OSError:
pass
for name, config in CONFIGS.items():
if name not in subset:
continue
if 'is_installed' in config:
if config['is_installed']():
print('Skipping already installed:', name)
continue
elif _is_installed(name, config['checksums']):
print('Skipping already installed:', name)
continue
print('Preparing', name)
datas = config['loader']()
saver = config.get('saver', _save_as_tfrecord)
for sub_name, data in datas.items():
if sub_name == 'readme':
filename = os.path.join(DATA_DIR, '%s-%s.txt' % (name, sub_name))
with open(filename, 'w') as f:
f.write(data)
elif sub_name == 'files':
for file_and_data in data:
path = os.path.join(DATA_DIR, file_and_data.filename)
open(path, "wb").write(file_and_data.data)
else:
saver(data, '%s-%s' % (name, sub_name))
| |
import json
from collections import deque
from ca_entities import ALL_ENTITIES
from ca_base_entity import VoidEntity, ENTITIES_NAMES, ENTITIES_IDS, Point, NEIGHBORHOOD_TYPES
from ca_link import LINK_TYPE_NAMES, LINK_TYPE_IDS
from utils import rotate_point, debug, Point
from collections import defaultdict
from six import add_metaclass
from os import path
class BaseGrid(defaultdict):
"""The base object to memorize all entities
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
# Factory of a Void entity
ret = self[key] = self.default_factory(VoidEntity().type)
return ret
class HistoryExtension(object):
"""Extension for CellularGrid to manage history of actions
"""
def undo(self):
"""Undo last action
"""
# DEBUG
#debug("undo", ("actions", self._actions_dicts))
if self._actions_index > 0:
# DEBUG
#debug("undo before clear", ("dict", self._grid))
self.clear()
# DEBUG
#debug("undo after clear", ("dict", self._grid))
for point, entity in self._actions_dicts[self._actions_index - 1].viewitems():
self.insert(point, entity)
self._actions_index -= 1
# DEBUG
#debug("undo exit", ("dict", self._grid))
def redo(self):
"""Redo previous action
"""
# DEBUG
#debug("redo", ("actions", self._actions_dicts))
if self._actions_index + 1 < len(self._actions_dicts):
self.clear()
for point, entity in self._actions_dicts[self._actions_index + 1].viewitems():
self.insert(point, entity)
self._actions_index += 1
def push_actions(self):
"""Add last action to the history
"""
# DEBUG
#debug("push_actions", ("actions", self._actions_dicts))
while len(self._actions_dicts) != self._actions_index + 1:
self._actions_dicts.pop()
if len(self._actions_dicts) <= self._max_actions:
self._actions_dicts.append(dict((point, entity_t) for point, entity_t in self._grid.viewitems(
) if entity_t != ENTITIES_NAMES['void']))
self._actions_index += 1
else:
self._actions_dicts.popleft()
self._actions_dicts.append(dict((point, entity_t) for point, entity_t in self._grid.viewitems(
) if entity_t != ENTITIES_NAMES['void']))
def actions_status(self):
"""Return the current len of history action and the current index
"""
return (len(self._actions_dicts), self._actions_index)
class HistoryMetaclass(type):
"""Metaclass that extend CellularGrid when
we require an history actions manager
"""
def __call__(cls, *args, **kwargs):
if kwargs and kwargs.get('history', False):
kwargs.pop('history')
instance = type(
cls.__name__, (HistoryExtension,), cls.__dict__.copy())
setattr(instance, "_actions_dicts", deque())
setattr(instance, "_actions_index", 0)
setattr(instance, "_max_actions", 10)
instance._actions_dicts.append(BaseGrid(int).copy())
return instance.__call__(*args, **kwargs)
instance = type(cls.__name__, (object,), cls.__dict__.copy())
return instance.__call__(*args, **kwargs)
@add_metaclass(HistoryMetaclass)
class CellularGrid(object):
"""Object that manage the grid and his entities
"""
def __init__(self):
self._grid = BaseGrid(int)
self._grid_sel = BaseGrid(int)
self._linked_grids = dict()
self._linked_names = dict()
self._links = dict()
self._my_links = dict()
self.__actions = {
'del': list(),
'ins': list()
}
self.__selection_list = list()
self.__selected_my_links = list()
self.__selected_links = list()
self.__all_selection = list()
self.__speed = 1
self.__step = -1
self.__filename = None
##
# Speed section -----------------------------------------------------------
def get_speed(self):
"""Returns the speed multiplicator for the current grid
"""
return self.__speed
def set_speed(self, speed):
"""Sets the speed multiplicator for the current grid
"""
self.__speed = speed
def set_grid_speed(self, id_, speed):
"""Sets the speed multiplicator for a linked grid
"""
self._linked_grids[id_].set_speed(speed)
##
# Link section ------------------------------------------------------------
def insert_link(self, pos, type_, mylink, id_=None, id_pos=None):
"""Inserts a grid link
"""
if mylink:
if type_ == LINK_TYPE_NAMES["IN"]:
self._my_links[pos] = LINK_TYPE_NAMES["IN"]
elif type_ == LINK_TYPE_NAMES["OUT"]:
self._my_links[pos] = LINK_TYPE_NAMES["OUT"]
else:
if type_ == LINK_TYPE_NAMES["IN"]:
self._links[pos] = (LINK_TYPE_NAMES["IN"], id_, id_pos)
elif type_ == LINK_TYPE_NAMES["OUT"]:
self._links[pos] = (LINK_TYPE_NAMES["OUT"], id_, id_pos)
#debug("insert_link", ("my_links", self._my_links), ("links", self._links))
def delete_link(self, pos):
"""Deletes a link in a specific position
"""
# DEBUG
#debug("delete_link", ("cond", pos in self._my_links.viewkeys()))
if pos in self._my_links.viewkeys():
temp = self._my_links.pop(pos)
del temp
if pos in self._links.viewkeys():
temp = self._links.pop(pos)
del temp
def clear_links(self):
"""Deletes all the links from the current grid
"""
self._linked_grids.clear()
self._linked_names.clear()
self._links.clear()
self._my_links.clear()
def get_my_links_list(self):
"""Returns a list of links that belong to the current grid
with their types
"""
return [(pos, type_) for pos, type_ in self._my_links.viewitems()]
def get_linked_grids(self):
"""Generates a list of attribute for each grid linked
"""
for id_, grid in self._linked_grids.viewitems():
yield (id_, grid.get_my_links_list(), self._linked_names[id_], grid.speed)
def update_linked_grids(self):
"""Returns the id and filename attribute for each linked grid
"""
for id_, grid in self._linked_grids.viewitems():
new_grid = CellularGrid()
new_grid.load(grid.filename)
old_grid = self._linked_grids.pop(id_)
del old_grid
self._linked_grids[id_] = new_grid
links_to_delete = list()
for pos, data in self._links.viewitems():
type_, id_l, id_pos_l = data
if id_l == id_:
if not (id_pos_l in new_grid._my_links):
links_to_delete.append(pos)
for pos in links_to_delete:
self.delete_link(pos)
def get_my_links(self):
"""Generates a tuple (position, type) for each link of the current grid
"""
for pos, type_ in self._my_links.viewitems():
# DEBUG
#debug("get_links", ("link", (pos, type_)))
yield (pos, type_)
def get_links(self):
"""Generates a tuple (position, type, id, position on source grid)
for each link of the linked grids
"""
for pos, data in self._links.viewitems():
type_, id_, id_pos = data
yield (pos, type_, id_, id_pos)
def insert_grid(self, filename):
"""Inserts a linked grid
"""
link_grid = CellularGrid()
link_grid.load(filename=filename)
self._linked_grids[id(link_grid)] = link_grid
self._linked_names[id(link_grid)] = path.split(filename)[-1]
# DEBUG
#debug("insert_grid", ("linked_grids", self._linked_grids))
def delete_grid(self, id_):
"""Deletes a linked grid
"""
grid = self._linked_grids.pop(id_)
del grid
name = self._linked_names.pop(id_)
del name
for key, value in self._links.items():
if value[1] == id_:
temp = self._links.pop(key)
del temp
# DEBUG
#debug("delete_grid", ("linked_grids", self._linked_grids))
##
# Transformations section ------------------------------------------------
def rotate(self, deg=90):
"""Rotates the entities selected and
update the selction list
"""
if len(self.__selection_list) == 0:
return
max_ = max(self.__all_selection)
min_ = min(self.__all_selection)
pivot = Point(
min_.x + ((max_.x - min_.x) / 2.),
min_.y + ((max_.y - min_.y) / 2.),
)
new_dict = BaseGrid(int)
new_selection = list()
new_all = list()
for point in self.__selection_list:
new_pos = rotate_point(point, pivot, deg)
new_selection.append(new_pos)
new_dict[new_pos] = ALL_ENTITIES[self._grid[point]].rotate(deg)
self.delete(point)
self.__selection_list = new_selection
self.__all_selection = list()
for point in self.__all_selection:
new_all.append(rotate_point(point, pivot, deg))
self.__all_selection = new_all
for pos, entity in self._grid.viewitems():
if pos not in new_dict:
new_dict[pos] = entity
self._grid.clear()
self._grid.update(new_dict)
self.__update_selection()
def flip_h(self):
"""Flips horizontally the entities selected
and update the selection list
"""
# DEBUG
#debug("flip_h", ("selection", self.__selection_list))
max_x = max(self.__all_selection)[0]
min_x = min(self.__all_selection)[0]
mid_x = (max_x - min_x) / 2
new_dict = BaseGrid(int)
for point in self.__selection_list:
my_x, my_y = point
if my_x > mid_x:
new_x = min_x + (max_x - my_x)
else:
new_x = max_x - (my_x - min_x)
new_pos = Point(new_x, my_y)
new_dict[new_pos] = ALL_ENTITIES[self._grid[point]].flip_h()
self.delete(point)
for pos, entity in self._grid.viewitems():
if pos not in new_dict and pos not in self.__selection_list:
new_dict[pos] = entity
self._grid.clear()
self._grid.update(new_dict)
self.__update_selection()
def flip_v(self):
"""Flips vertically the entities selected
and update the selection list
"""
max_y = max(self.__all_selection)[1]
min_y = min(self.__all_selection)[1]
mid_y = (max_y - min_y) / 2
new_dict = BaseGrid(int)
for point in self.__selection_list:
my_x, my_y = point
if my_y > mid_y:
new_y = min_y + (max_y - my_y)
else:
new_y = max_y - (my_y - min_y)
new_pos = Point(my_x, new_y)
new_dict[new_pos] = ALL_ENTITIES[self._grid[point]].flip_v()
self.delete(point)
for pos, entity in self._grid.viewitems():
if pos not in new_dict:
new_dict[pos] = entity
self._grid.clear()
self._grid.update(new_dict)
self.__update_selection()
##
# Edit section ------------------------------------------------------------
def delete_selected_entities(self, links=False):
"""Deletes all selected entities
"""
if links:
for point in self.__selected_my_links:
self.delete_link(point)
for point in self.__selected_links:
self.delete_link(point)
else:
entities = False
for point in self.__selection_list:
if not entities:
entities = not entities
self.delete(point)
return entities
def move_selected_entities(self, x, y):
"""Moves all selected entities
"""
new_selection = list()
new_all = list()
new_dict = BaseGrid(int)
for pos in self.__selection_list:
new_pos = Point(pos.x - x, pos.y - y)
# DEBUG
#debug("move_selected_entities", ("NEW POS", new_pos != pos))
if new_pos != pos:
new_selection.append(new_pos)
new_dict[new_pos] = self._grid_sel.pop(pos)
else:
return
self._grid_sel = new_dict
self.__all_selection = list()
for point in self.__all_selection:
new_all.append(Point(point.x - x, point.y - y))
self.__all_selection = new_all
# DEBUG
#debug("move_selected_entities", ("NEW SELECTION", new_selection))
if len(new_selection) != 0:
self.__selection_list = new_selection
def move_all(self, x, y):
"""Moves the entire grid and simulates view motion
"""
new_dict = dict()
for pos in self._my_links.viewkeys():
new_pos = Point(pos.x - x, pos.y - y)
new_dict[new_pos] = self._my_links[pos]
self._my_links.clear()
self._my_links = new_dict
new_dict = dict()
for pos in self._links.viewkeys():
new_pos = Point(pos.x - x, pos.y - y)
new_dict[new_pos] = self._links[pos]
self._links.clear()
self._links = new_dict
new_dict = BaseGrid(int)
for pos, entity_t in self._grid.viewitems():
new_pos = Point(pos.x - x, pos.y - y)
new_dict[new_pos] = entity_t
self._grid = new_dict
new_dict = BaseGrid(int)
for pos, entity_t in self._grid_sel.viewitems():
new_pos = Point(pos.x - x, pos.y - y)
new_dict[new_pos] = entity_t
self._grid_sel = new_dict
self.__update_selection()
def load_selection(self):
"""Loads selected entities on the clipboard
"""
# DEBUG
# debug("load_selection")
for point in self.__selection_list:
# DEBUG
#debug("load_selection", ("dict point", self._grid[point]))
self._grid_sel[point] = self._grid.pop(point)
def store_selection(self):
"""Stores the clipboard entities on the grid
"""
# DEBUG
# debug("store_selection")
#debug("store_selection", ("ALL", self.__all_selection))
#debug("store_selection", ("SEL", self.__selection_list))
#debug("store_selection", ("DICT SEL", self._grid_sel))
entities_to_ins = list()
for point in self.__all_selection:
self.delete(point)
try:
entities_to_ins.append((point, self._grid_sel.pop(point)))
except KeyError:
self.insert(point, VoidEntity().type)
for point, entity_t in entities_to_ins:
self.insert(point, entity_t)
# self.update_neighbors()
def clear_selection(self):
"""Clears the current selection
"""
# DEBUG
#debug("clear_selection", ("selection list", self.__selection_list))
del self.__selection_list[:]
del self.__selected_my_links[:]
del self.__selected_links[:]
#self._grid_sel = BaseGrid()
def __update_selection(self):
"""Updates the current selection
"""
if len(self.__selection_list) != 0:
self.clear_selection()
self.select_entities(self.__all_selection)
def select_entities(self, item_list):
"""Selects entities in the selection area
"""
# DEBUG
#debug("select_entities", ("Item list", item_list))
self.__all_selection = item_list
sel_list_app = self.__selection_list.append # Speedup append method
# Speedup append method
sel_list_mylinks_app = self.__selected_my_links.append
# Speedup append method
sel_list_links_app = self.__selected_links.append
entities_points = [point for point, entity_t in self._grid.viewitems(
) if entity_t != ENTITIES_NAMES["void"] and entity_t != ENTITIES_NAMES["deadcell"]]
# if len(self._grid_sel) == 0:
# DEBUG
#debug("select_entities", ("item_list", item_list))
for point in item_list:
# DEBUG
#debug("select_entities", ("POINT IN SELECTION", point not in self.__selection_list))
if point in entities_points and point not in self.__selection_list:
# DEBUG
#debug("select_entities", ("POINT IN SELECTION", point))
sel_list_app(point)
elif point in self._my_links:
sel_list_mylinks_app(point)
elif point in self._links:
sel_list_links_app(point)
##
# Get section -------------------------------------------------------------
def __getattr__(self, attr):
if attr == "filename":
return self.__filename
elif attr == "speed":
return self.__speed
def __getitem__(self, pos):
"""Access to the grid like a container (readonly)
"""
return self._grid[pos]
def get_entities_to_copy(self):
"""Return a list of (pos, type) for each entity selected
"""
return [(pos, self._grid[pos]) for pos in self.__selection_list]
def get_entities(self):
"""Generates a list of non void entities and their position
"""
for position, entity_t in self._grid.viewitems():
if entity_t != ENTITIES_NAMES["void"]:
yield (position, entity_t)
for position, entity_t in self._grid_sel.viewitems():
if entity_t != ENTITIES_NAMES["void"]:
yield (position, entity_t)
def get_selection_entities_points(self):
"""Return a list of points for non void entities
"""
return [point for point in self.__selection_list]
##
# Grid actions section ----------------------------------------------------
def update(self):
"""Updates the entities on the grid with their specific actions
"""
self.__step += 1
# LINK UPDATE ----- count_step
#self._links[pos] = (LINK_TYPE_NAMES["OUT"], id_, id_pos)
# Check links IN
in_id = LINK_TYPE_NAMES["IN"]
for pos, data in self._links.viewitems():
type_, id_, id_pos = data
if type_ == in_id:
self._linked_grids[id_].insert(id_pos, self._grid[pos])
# Steps of linked grids
for grid in self._linked_grids.viewvalues():
if self.speed == grid.speed:
grid.update()
elif self.speed < grid.speed:
step = float(self.speed) / float(grid.speed)
cond = step
while cond <= float(self.speed):
# DEBUG
#debug("UPDATE sub ===")
grid.update()
cond += step
# self.speed > grid.speed:
elif self.speed == grid.speed + self.__step:
# DEBUG
#debug("UPDATE sub")
grid.update()
# Step of the grid
# DEBUG
#debug("=== UPDATE" + self.filename)
void_id = ENTITIES_NAMES['void']
for pos, entity_t in self._grid.items():
if entity_t != void_id:
ALL_ENTITIES[entity_t].step(self, pos)
# DEBUG
#debug("update", ("id", id(self)), ("list", self.__actions))
for pos in self.__actions['del']:
self.delete(pos)
for pos, type_ in self.__actions['ins']:
self.insert(pos, type_)
if self.__step == self.speed - 1:
self.__step = -1
# Update link out
out_id = LINK_TYPE_NAMES["OUT"]
for pos, data in self._links.viewitems():
type_, id_, id_pos = data
if type_ == out_id:
self.insert(pos, self._linked_grids[id_][id_pos])
self.__update_selection()
del self.__actions['del'][:]
del self.__actions['ins'][:]
def insert_action(self, action):
"""Inserts an action that will be processed by the grid
action = tuple(command, position)
"""
com, pos = action
if com in self.__actions:
if pos not in self.__actions[com]:
self.__actions[com].append(pos)
def insert(self, pos, type_):
"""Inserts an entity on the grid
"""
# DEBUG
#debug("insert", ("pos", pos), ("type", type_))
entity_t = None
if self._grid[pos] != type_:
entity_t = ALL_ENTITIES[type_].type
else:
return # Entity already exist
self._grid[pos] = entity_t
if ALL_ENTITIES[entity_t].neighborhood is not None:
if ALL_ENTITIES[entity_t].neighborhood == NEIGHBORHOOD_TYPES["moore"]:
x, y = pos
for new_x in range(x - 1, x + 2):
for new_y in range(y - 1, y + 2):
pos = Point(new_x, new_y)
if self._grid[pos] != entity_t:
self._grid[pos] = ALL_ENTITIES[entity_t].neighbors
elif ALL_ENTITIES[entity_t].neighborhood == NEIGHBORHOOD_TYPES["1D"]:
x, y = pos
for new_x in range(-1, 2, 2):
pos = Point(x + new_x, y)
if self._grid[pos] != entity_t:
self._grid[pos] = ALL_ENTITIES[entity_t].neighbors
def delete(self, pos):
"""Deletes an entity from the grid
"""
# DEBUG
#debug("delete", ("Delete", pos))
if pos in self._grid:
temp_type = self._grid.pop(pos)
if temp_type == ENTITIES_NAMES["livingcell"]:
self._grid[pos] = ALL_ENTITIES[ENTITIES_NAMES['deadcell']].type
else:
self._grid[pos] = VoidEntity().type
del temp_type
# DEBUG
# debug("DELETE!!!")
#debug("delete", ("void", VoidEntity().type), ("entity", self._grid[pos].type))
# debug("DELETE!!!")
def clear(self):
"""Clears the current grid
"""
self._grid.clear()
def clear_sparks(self):
"""Deletes all sparks on the grid
"""
for pos, entity_t in self._grid.viewitems():
if entity_t == ENTITIES_NAMES["spark"]:
self.delete(pos)
##
# Marshalling section -----------------------------------------------------
def store(self, filename="stored.cg"):
"""Saves into a file the current grid
"""
store_dict = dict()
for point, entity_t in self._grid.viewitems():
try:
store_dict[ENTITIES_IDS[entity_t]].append(point)
except KeyError:
store_dict[ENTITIES_IDS[entity_t]] = list()
store_dict[ENTITIES_IDS[entity_t]].append(point)
store_dict["my_links"] = dict()
for point, link in self._my_links.viewitems():
try:
store_dict["my_links"][LINK_TYPE_IDS[link]].append(point)
except KeyError:
store_dict["my_links"][LINK_TYPE_IDS[link]] = list()
store_dict["my_links"][LINK_TYPE_IDS[link]].append(point)
if len(self._links) > 0:
store_dict["links"] = dict()
for point, data in self._links.viewitems():
store_dict["links"]["%s,%s" % point] = data
if len(self._linked_grids) > 0:
store_dict["linked_names"] = dict()
for id_, name in self._linked_names.viewitems():
store_dict["linked_names"][str(id_)] = name
with open(filename, "w") as fp:
json.dump(store_dict, fp)
#self._linked_grids = dict()
#self._linked_names = dict()
#self._links = dict() (LINK_TYPE_NAMES["IN"], id_, id_pos)
def load(self, filename="stored.cg"):
"""Loads a grid from a file
"""
base_path = path.dirname(filename)
#base_path = "/".join(filename.split("/")[:-1])
# DEBUG
#debug("load", ("base_path", base_path), ("filename", filename))
self.__filename = filename
with open(filename, "r") as fp:
stored_dict = json.load(fp)
for type_, list_ in stored_dict.viewitems():
if type_ == "my_links":
for sub_type, sub_list in list_.viewitems():
for pos in sub_list:
pos = Point(pos[0], pos[1])
if sub_type == LINK_TYPE_IDS[LINK_TYPE_NAMES["IN"]]:
self.insert_link(pos, LINK_TYPE_NAMES["IN"], True)
elif sub_type == LINK_TYPE_IDS[LINK_TYPE_NAMES["OUT"]]:
self.insert_link(pos, LINK_TYPE_NAMES["OUT"], True)
# "links": {"10,6": [[-1, 36574928, [12, 5]]]}
elif type_ == "links":
added = dict()
grids_container = dict()
for point, data in list_.viewitems():
pos = map(int, point.split(","))
pos = Point(pos[0], pos[1])
type_, id_, id_pos = data
id_pos = Point(id_pos[0], id_pos[1])
if id_ not in added:
new_grid = CellularGrid()
new_grid.load(
path.join(base_path, stored_dict["linked_names"][str(id_)]))
added[id_] = id(new_grid)
grids_container[id_] = new_grid
self._linked_names[added[id_]] = stored_dict[
"linked_names"][str(id_)]
self._linked_grids[added[id_]] = grids_container[id_]
self._links[pos] = (type_, added[id_], id_pos)
elif type_ in ENTITIES_NAMES:
for pos in list_:
pos = Point(pos[0], pos[1])
self.insert(pos, ENTITIES_NAMES[type_])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import generators
'''
Test the basic functions of parsec.py.
'''
__author__ = 'He Tao, sighingnow@gmail.com'
import random
import unittest
from parsec import *
class ParsecTest(unittest.TestCase):
'''Test the implementation of Text.Parsec. (The final test for all apis)'''
def test_times_with_then(self):
parser = times(letter(), 3) >> digit()
self.assertEqual(parser.parse('xyz1'), '1')
self.assertRaises(ParseError, parser.parse, 'xy1')
self.assertRaises(ParseError, parser.parse, 'xyz')
self.assertRaises(ParseError, parser.parse, 'xyzw')
def test_many_with_then(self):
parser = many(string('x')) >> string('y')
self.assertEqual(parser.parse('y'), 'y')
self.assertEqual(parser.parse('xy'), 'y')
self.assertEqual(parser.parse('xxxxxy'), 'y')
def test_times_with_min_and_max(self):
parser = times(letter(), 2, 4)
self.assertEqual(parser.parse('xy'), ['x', 'y'])
self.assertEqual(parser.parse('xyz'), ['x', 'y', 'z'])
self.assertEqual(parser.parse('xyzw'), ['x', 'y', 'z', 'w'])
self.assertEqual(parser.parse('xyzwv'), ['x', 'y', 'z', 'w'])
self.assertRaises(ParseError, parser.parse, 'x')
def test_times_with_min_and_max_and_then(self):
parser = times(letter(), 2, 4) >> digit()
self.assertEqual(parser.parse('xy1'), '1')
self.assertEqual(parser.parse('xyz1'), '1')
self.assertEqual(parser.parse('xyzw1'), '1')
self.assertRaises(ParseError, parser.parse, 'xy')
self.assertRaises(ParseError, parser.parse, 'xyzw')
self.assertRaises(ParseError, parser.parse, 'xyzwv1')
self.assertRaises(ParseError, parser.parse, 'x1')
class ParsecPrimTest(unittest.TestCase):
'''Test the implementation of Text.Parsec.Prim.'''
def test_bind(self):
nonlocals = {'piped': None}
def binder(x):
nonlocals['piped'] = x
return string('y')
parser = string('x').bind(binder)
self.assertEqual(parser.parse('xy'), 'y')
self.assertEqual(nonlocals['piped'], 'x')
self.assertRaises(ParseError, parser.parse, 'x')
def test_compose(self):
parser = string('x') >> string('y')
self.assertEqual(parser.parse('xy'), 'y')
self.assertRaises(ParseError, parser.parse, 'y')
self.assertRaises(ParseError, parser.parse, 'z')
def test_joint(self):
parser = string('x') + string('y')
self.assertEqual(parser.parse('xy'), ('x', 'y'))
self.assertRaises(ParseError, parser.parse, 'y')
self.assertRaises(ParseError, parser.parse, 'z')
nonlocals = {'changed': False}
@generate
def fn():
nonlocals['changed'] = True
yield string('y')
parser = string('x') + fn
self.assertRaises(ParseError, parser.parse, '1')
self.assertEqual(nonlocals['changed'], False)
def test_choice(self):
parser = string('x') | string('y')
self.assertEqual(parser.parse('x'), 'x')
self.assertEqual(parser.parse('y'), 'y')
self.assertRaises(ParseError, parser.parse, 'z')
parser = string('xy') | string('xz')
self.assertEqual(parser.parse('xy'), 'xy')
self.assertRaises(ParseError, parser.parse, 'xz')
def test_try_choice(self):
parser = string('x') ^ string('y')
self.assertEqual(parser.parse('x'), 'x')
self.assertEqual(parser.parse('y'), 'y')
self.assertRaises(ParseError, parser.parse, 'z')
parser = string('xy') ^ string('xz')
self.assertEqual(parser.parse('xy'), 'xy')
self.assertEqual(parser.parse('xz'), 'xz')
def test_ends_with(self):
parser = string('x') < string('y')
self.assertEqual(parser.parse('xy'), 'x')
self.assertRaises(ParseError, parser.parse, 'xx')
def test_parsecmap(self):
def mapfn(p):
return p + p
parser = string('x').parsecmap(mapfn)
self.assertEqual(parser.parse('x'), 'xx')
def test_parsecapp(self):
def genfn(p):
return lambda c: 'fn:' + p + c + c
parser = string('x').parsecmap(genfn).parsecapp(string('y'))
self.assertEqual(parser.parse('xy'), 'fn:xyy')
def test_desc(self):
parser = string('x')
self.assertEqual(parser.parse('x'), 'x')
self.assertRaises(ParseError, parser.parse, 'y')
def test_mark(self):
parser = many(mark(many(letter())) << string("\n"))
lines = parser.parse("asdf\nqwer\n")
self.assertEqual(len(lines), 2)
(start, letters, end) = lines[0]
self.assertEqual(start, (0, 0))
self.assertEqual(letters, ['a', 's', 'd', 'f'])
self.assertEqual(end, (0, 4))
(start, letters, end) = lines[1]
self.assertEqual(start, (1, 0))
self.assertEqual(letters, ['q', 'w', 'e', 'r'])
self.assertEqual(end, (1, 4))
def test_choice_with_compose(self):
parser = (string('\\') >> string('y')) | string('z')
self.assertEqual(parser.parse('\\y'), 'y')
self.assertEqual(parser.parse('z'), 'z')
self.assertRaises(ParseError, parser.parse, '\\z')
class ParsecCombinatorTest(unittest.TestCase):
'''Test the implementation of Text.Parsec.Combinator.'''
def test_times(self):
parser = times(string('x'), 2, 10)
self.assertEqual(parser.parse('xxx'), ['x', 'x', 'x'])
self.assertRaises(ParseError, parser.parse, 'x')
self.assertRaises(ParseError, parser.parse, 'xyyyyyyyyyyyyyyyyyyyyyy')
parser = times(letter(), 0)
self.assertEqual(parser.parse(''), [])
self.assertEqual(parser.parse('x'), [])
self.assertEqual(parser.parse('xxxxx'), [])
def test_count(self):
parser = count(letter(), 3)
self.assertEqual(parser.parse('xyz'), ['x', 'y', 'z'])
self.assertEqual(parser.parse('xyzwwwww'), ['x', 'y', 'z'])
self.assertRaises(ParseError, parser.parse, 'xy')
def test_optional(self):
parser = optional(string('xx'))
self.assertEqual(parser.parse('xx'), 'xx')
self.assertEqual(parser.parse('xy'), None)
def test_optional_default(self):
parser = optional(string('xx'), 'k')
self.assertEqual(parser.parse('xx'), 'xx')
self.assertEqual(parser.parse('xy'), 'k')
def test_many(self):
parser = many(letter())
self.assertEqual(parser.parse('x'), ['x'])
self.assertEqual(parser.parse('xyz'), ['x', 'y', 'z'])
self.assertEqual(parser.parse(''), [])
self.assertEqual(parser.parse('1'), [])
# from #28
def test_many_many(self):
parser = many(many(space()))
self.assertEqual(parser.parse(' '), [[' ', ' ', ' ', ' ']])
parser = times(spaces(), 4, 10)
self.assertEqual(parser.parse(''), [[], [], [], []])
self.assertEqual(parser.parse(' '), [[' '], [], [], []])
self.assertEqual(parser.parse(' '), [[' ', ' '], [], [], []])
def test_many1(self):
parser = many1(letter())
self.assertEqual(parser.parse('x'), ['x'])
self.assertEqual(parser.parse('xyz'), ['x', 'y', 'z'])
self.assertRaises(ParseError, parser.parse, '')
self.assertRaises(ParseError, parser.parse, '1')
def test_separated(self):
parser = separated(string('x'), string(','), 2, 4)
self.assertEqual(parser.parse('x,x,x') , ['x', 'x', 'x'])
self.assertEqual(parser.parse('x,x,x,'), ['x', 'x', 'x'])
self.assertRaises(ParseError, parser.parse, 'x')
self.assertRaises(ParseError, parser.parse, 'x,')
self.assertRaises(ParseError, parser.parse, 'x,y,y,y,y')
self.assertRaises(ParseError, parser.parse, 'x,y,y,y,y,')
self.assertEqual(parser.parse('x,x,y,y' ), ['x','x'])
self.assertEqual(parser.parse('x,x,y,y,'), ['x','x'])
parser = separated(letter(), string(','), 0)
self.assertEqual(parser.parse('') , [])
self.assertEqual(parser.parse('x') , [])
self.assertEqual(parser.parse('x,') , [])
self.assertEqual(parser.parse('x,x,x,x,x') , [])
self.assertEqual(parser.parse('x,x,x,x,x,'), [])
# see GH-48
parser = separated(string('a'), string(','), 3, 3, end=False)
r, rest = parser.parse_partial('a,a,a,')
self.assertEqual(r, ['a', 'a', 'a'])
self.assertEqual(rest, ',')
parser = separated(string('a'), string(','), 3, 3, end=True)
r, rest = parser.parse_partial('a,a,a,')
self.assertEqual(r, ['a', 'a', 'a'])
self.assertEqual(rest, '')
parser = separated(string('a'), string(','), 3, 3, end=None)
r, rest = parser.parse_partial('a,a,a,')
self.assertEqual(r, ['a', 'a', 'a'])
self.assertEqual(rest, '')
parser = separated(string('a'), string(','), 3, 6, end=True)
r, rest = parser.parse_partial('a,a,a,a.')
self.assertEqual(r, ['a', 'a', 'a'])
self.assertEqual(rest, 'a.')
# see GH-49
parser = separated(string('a'), string(','), 3, 6, end=False)
r, rest = parser.parse_partial('a,a,a,')
self.assertEqual(r, ['a', 'a', 'a'])
self.assertEqual(rest, ',')
def test_sepBy(self):
parser = sepBy(letter(), string(','))
self.assertEqual(parser.parse_strict('x') , ['x'])
self.assertEqual(parser.parse ('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z') , ['x', 'y', 'z'])
self.assertEqual(parser.parse ('x,y,z,'), ['x', 'y', 'z'])
self.assertEqual(parser.parse ('') , []) # nothing consumed
self.assertEqual(parser.parse ('1'), []) # nothing consumed
self.assertEqual(parser.parse ('1,'), []) # nothing consumed
def test_sepBy1(self):
parser = sepBy1(letter(), string(','))
self.assertEqual(parser.parse_strict('x') , ['x'])
self.assertEqual(parser.parse ('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z') , ['x', 'y', 'z'])
self.assertEqual(parser.parse ('x,y,z,'), ['x', 'y', 'z'])
self.assertRaises(ParseError, parser.parse, (''))
self.assertRaises(ParseError, parser.parse, ('1'))
self.assertRaises(ParseError, parser.parse, ('1,'))
def test_endBy(self):
parser = endBy(letter(), string(','))
self.assertEqual(parser.parse_strict('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z,'), ['x', 'y', 'z'])
self.assertEqual(parser.parse ('') , [])
self.assertEqual(parser.parse ('1') , [])
self.assertEqual(parser.parse ('1,') , [])
self.assertEqual(parser.parse ('x') , [])
self.assertEqual(parser.parse ('x,') , ['x'])
def test_endBy1(self):
parser = endBy1(letter(), string(','))
self.assertRaises(ParseError, parser.parse, ('x'))
self.assertRaises(ParseError, parser.parse_strict, ('x,y,z'))
self.assertEqual(parser.parse_strict('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z,'), ['x', 'y', 'z'])
self.assertEqual(parser.parse('x,y,z') , ['x', 'y'])
self.assertRaises(ParseError, parser.parse, (''))
self.assertRaises(ParseError, parser.parse, ('1'))
self.assertRaises(ParseError, parser.parse, ('1,'))
def test_sepEndBy(self):
parser = sepEndBy(letter(), string(','))
self.assertEqual(parser.parse_strict('x') , ['x'])
self.assertEqual(parser.parse_strict('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z') , ['x', 'y', 'z'])
self.assertEqual(parser.parse_strict('x,y,z,'), ['x', 'y', 'z'])
self.assertEqual(parser.parse ('') , [])
self.assertEqual(parser.parse ('1') , [])
self.assertEqual(parser.parse ('1,') , [])
def test_sepEndBy1(self):
parser = sepEndBy1(letter(), string(','))
self.assertEqual(parser.parse_strict('x') , ['x'])
self.assertEqual(parser.parse_strict('x,') , ['x'])
self.assertEqual(parser.parse_strict('x,y,z') , ['x', 'y', 'z'])
self.assertEqual(parser.parse_strict('x,y,z,'), ['x', 'y', 'z'])
self.assertRaises(ParseError, parser.parse, (''))
self.assertRaises(ParseError, parser.parse, ('1'))
self.assertRaises(ParseError, parser.parse, ('1,'))
def test_excepts(self):
parser = (string('<') / string('=')) ^ string('<=')
self.assertEqual(parser.parse('<'), "<")
self.assertEqual(parser.parse('<='), "<=")
parser = string('<') ^ string('<=')
self.assertEqual(parser.parse('<'), "<")
self.assertEqual(parser.parse('<='), "<")
def test_fix(self):
@Parser
@fix
def bracketed_expr(recur):
return (string("(") >> recur << string(")")) | any()
self.assertEqual(bracketed_expr.parse("((x))"), 'x')
class ParsecCharTest(unittest.TestCase):
'''Test the implementation of Text.Parsec.Char.'''
def test_string(self):
parser = string('x')
self.assertEqual(parser.parse('x'), 'x')
self.assertRaises(ParseError, parser.parse, 'y')
def test_regex(self):
parser = regex(r'[0-9]')
self.assertEqual(parser.parse('1'), '1')
self.assertEqual(parser.parse('4'), '4')
self.assertRaises(ParseError, parser.parse, 'x')
class ParserGeneratorTest(unittest.TestCase):
'''Test the implementation of Parser Generator.(generate)'''
def test_generate_desc(self):
description = 'expected description for fn'
@generate(description)
def fn():
yield string('t')
with self.assertRaises(ParseError) as err: fn.parse('x')
ex = err.exception
self.assertEqual(ex.expected, description)
self.assertEqual(ex.text, 'x')
self.assertEqual(ex.index, 0)
def test_generate_backtracking(self):
@generate
def xy():
yield string('x')
yield string('y')
assert False
parser = xy | string('z')
# should not finish executing xy()
self.assertEqual(parser.parse('z'), 'z')
def test_generate_raise(self):
# `return` with argument inside generator is not supported in Python 2.
# Instead, we can raise a `StopIteration` directly with the intended
# result in generator for Python 2.
#
# Before Python 3.3, the `StopIteration` didn't have the `value` attribute,
# we need to assign the attribute manually.
#
# See #15.
@generate
def xy():
yield string('x')
yield string('y')
r = StopIteration('success')
r.value = 'success' # for pre-3.3 Python
raise r
parser = xy
self.assertEqual(parser.parse('xy'), 'success')
if __name__ == '__main__':
unittest.main()
| |
from __future__ import print_function, unicode_literals
from unittest import TestCase
import mock
from nose.case import FunctionTestCase
from nose.plugins.skip import SkipTest
from skipnose.skipnose import SkipNose, walk_subfolders
class TestWalkSubfolders(TestCase):
@mock.patch('os.walk')
def test_walk_subfolders(self, mock_walk):
"""
Test that walk_subfolders returns os-joined paths
as returned by os.walk
"""
mock_walk.return_value = iter((
('path', ('a', 'b', 'c'), ('d', 'e', 'f')),
('other', ('q', 'w', 'e'), ('r', 't', 'y')),
))
expected = [
'path/a',
'path/b',
'path/c',
'other/q',
'other/w',
'other/e',
]
actual = walk_subfolders('foo')
self.assertListEqual(list(actual), expected)
mock_walk.assert_called_once_with('foo')
@mock.patch('skipnose.skipnose.print', mock.MagicMock(), create=True)
class TestSkipNoseConfig(TestCase):
"""
Test class for skipnose configurations
"""
def setUp(self):
super(TestSkipNoseConfig, self).setUp()
self.plugin = SkipNose()
def test_options(self):
"""
Test skipnose adds all configs to nose with
correct defaults taken from environment variables.
"""
env = {
'NOSE_SKIPNOSE_INCLUDE': 'including',
'NOSE_SKIPNOSE_EXCLUDE': 'excluding',
'NOSE_SKIPNOSE': 'on',
}
mock_parser = mock.MagicMock()
self.plugin.options(mock_parser, env)
mock_parser.add_option.assert_has_calls(
[
mock.call('--with-skipnose',
action='store_true',
default=True,
dest=mock.ANY,
help=mock.ANY),
mock.call('--skipnose-debug',
action='store_true',
default=False,
dest=mock.ANY,
help=mock.ANY),
mock.call('--skipnose-include',
action='append',
default=['including'],
dest=mock.ANY,
help=mock.ANY),
mock.call('--skipnose-exclude',
action='append',
default=['excluding'],
dest=mock.ANY,
help=mock.ANY),
]
)
@mock.patch('sys.exit')
@mock.patch('os.path.exists')
def test_configure(self, mock_path_exists, mock_sys_exit):
"""
Test that configure sets class attributes correctly
"""
mock_options = mock.MagicMock(
skipnose_debug=mock.sentinel.debug,
skipnose_include=['a', 'b:c'],
skipnose_exclude=['x', 'y'],
skipnose_skip_tests='foo.json',
)
mock_path_exists.return_value = True
mock_open = mock.MagicMock()
mock_open.return_value.__enter__.return_value.read.return_value = (
b'{"skip_tests": ["one", "two"]}'
)
mock_sys_exit.side_effect = SystemExit
with mock.patch('skipnose.skipnose.open', mock_open, create=True):
self.plugin.configure(mock_options, None)
self.assertTrue(self.plugin.enabled)
self.assertEqual(self.plugin.debug, mock.sentinel.debug)
self.assertEqual(self.plugin.skipnose_include, [['a'], ['b', 'c']])
self.assertEqual(self.plugin.skipnose_exclude, ['x', 'y'])
self.assertEqual(self.plugin.skipnose_skip_tests, ['one', 'two'])
mock_open.assert_called_once_with('foo.json', 'rb')
@mock.patch('sys.exit')
@mock.patch('os.path.exists')
def test_configure_error(self, mock_path_exists, mock_sys_exit):
"""
Test that configure sets class attributes correctly when
invalid skip-tests path is provided and nose exits
"""
mock_options = mock.MagicMock(
skipnose_debug=mock.sentinel.debug,
skipnose_include=['a', 'b:c'],
skipnose_exclude=['x', 'y'],
skipnose_skip_tests='foo.data',
)
mock_path_exists.return_value = False
mock_open = mock.MagicMock()
mock_sys_exit.side_effect = SystemExit
with mock.patch('skipnose.skipnose.open', mock_open, create=True):
with self.assertRaises(SystemExit):
self.plugin.configure(mock_options, None)
self.assertTrue(self.plugin.enabled)
self.assertEqual(self.plugin.debug, mock.sentinel.debug)
self.assertEqual(self.plugin.skipnose_include, [['a'], ['b', 'c']])
self.assertEqual(self.plugin.skipnose_exclude, ['x', 'y'])
self.assertIsNone(self.plugin.skipnose_skip_tests)
self.assertFalse(mock_open.called)
mock_sys_exit.assert_called_once_with(1)
@mock.patch('skipnose.skipnose.print', mock.MagicMock(), create=True)
class TestSkipNose(TestCase):
"""
Test class for skipnose functionality
"""
def setUp(self):
super(TestSkipNose, self).setUp()
self.plugin = SkipNose()
self.plugin.debug = True
self.test_paths = [
'/test',
'/test/bar/cat/one',
'/test/bar/cat/one/subone',
'/test/bar/cat/two',
'/test/bar/dog/one',
'/test/bar/dog/one/api',
'/test/bar/dog/one/api/subapi',
'/test/bar/dog/one/api/subapi/moreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi/api',
'/test/foo',
'/test/foo/api',
'/test/foo/api/subapi',
'/test/foo/api/subapi/moreapi',
'/test/foo/api/subapi/evenmoreapi',
'/test/foo/api/subsubapi',
'/test/foo/api/subsubapi/toomuchapi',
'/test/foo/nonapi',
'/test/foo/nonapi/folderone',
'/test/foo/nonapi/foldertwo',
'/test/foo/nonapi/foldertwo/morestuff',
'/test/foo/nonapi/foldertwo/toomuch',
]
def _mock_walk_subfolders(self, path):
"""
Function to be provided to mock.side_effect to replace
walk_subfolders functionality to use test paths.
"""
paths = list(map(lambda i: i, self.test_paths))
index = paths.index(path)
if len(paths) > index + 1:
return filter(lambda i: i.startswith(path), paths[index:])
else:
return []
def _test_paths(self, valid):
for path in self.test_paths:
expected = None if path in valid else False
actual = self.plugin.wantDirectory(path)
self.assertEqual(actual, expected,
'{} != {} for {}'.format(actual, expected, path))
@mock.patch('skipnose.skipnose.walk_subfolders')
def test_want_directory_include(self, mock_walk_subfolders):
"""
Test wantDirectory with include parameter
"""
mock_walk_subfolders.side_effect = self._mock_walk_subfolders
valid = [
'/test',
'/test/bar/dog/one',
'/test/bar/dog/one/api',
'/test/bar/dog/one/api/subapi',
'/test/bar/dog/one/api/subapi/moreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi/api',
'/test/foo',
'/test/foo/api',
'/test/foo/api/subapi',
'/test/foo/api/subapi/moreapi',
'/test/foo/api/subapi/evenmoreapi',
'/test/foo/api/subsubapi',
'/test/foo/api/subsubapi/toomuchapi',
]
self.plugin.skipnose_include = [['api']]
self._test_paths(valid)
@mock.patch('skipnose.skipnose.walk_subfolders')
def test_want_directory_include_multiple_or(self, mock_walk_subfolders):
"""
Test wantDirectory with multiple include OR parameters
"""
mock_walk_subfolders.side_effect = self._mock_walk_subfolders
valid = [
'/test',
'/test/bar/dog/one',
'/test/bar/dog/one/api',
'/test/bar/dog/one/api/subapi',
'/test/bar/dog/one/api/subapi/moreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi',
'/test/bar/dog/one/api/subapi/evenmoreapi/api',
'/test/foo',
'/test/foo/api',
'/test/foo/api/subapi',
'/test/foo/api/subapi/moreapi',
'/test/foo/api/subapi/evenmoreapi',
'/test/foo/api/subsubapi',
'/test/foo/api/subsubapi/toomuchapi',
'/test/foo/nonapi',
'/test/foo/nonapi/folderone',
'/test/foo/nonapi/foldertwo',
'/test/foo/nonapi/foldertwo/morestuff',
'/test/foo/nonapi/foldertwo/toomuch',
]
self.plugin.skipnose_include = [['api', 'foo']]
self._test_paths(valid)
@mock.patch('skipnose.skipnose.walk_subfolders')
def test_want_directory_include_multiple_and(self, mock_walk_subfolders):
"""
Test wantDirectory with multiple include OR parameters
"""
mock_walk_subfolders.side_effect = self._mock_walk_subfolders
valid = [
'/test',
'/test/foo',
'/test/foo/api',
'/test/foo/api/subapi',
'/test/foo/api/subapi/moreapi',
'/test/foo/api/subapi/evenmoreapi',
'/test/foo/api/subsubapi',
'/test/foo/api/subsubapi/toomuchapi',
]
self.plugin.skipnose_include = [['api'], ['foo']]
self._test_paths(valid)
def test_want_directory_exclude(self):
"""
Test wantDirectory with exclude parameter
"""
valid = [
'/test',
'/test/bar/cat/one',
'/test/bar/cat/one/subone',
'/test/bar/cat/two',
'/test/bar/dog/one',
'/test/bar/dog/one/api/subapi', # implicitly skipped by walk
'/test/bar/dog/one/api/subapi/moreapi', # noqa implicitly skipped by walk
'/test/bar/dog/one/api/subapi/evenmoreapi', # noqa implicitly skipped by walk
'/test/foo',
'/test/foo/api/subapi', # implicitly skipped by walk
'/test/foo/api/subapi/moreapi', # implicitly skipped by walk
'/test/foo/api/subapi/evenmoreapi', # implicitly skipped by walk
'/test/foo/api/subsubapi', # implicitly skipped by walk
'/test/foo/api/subsubapi/toomuchapi', # implicitly skipped by walk
'/test/foo/nonapi',
'/test/foo/nonapi/folderone',
'/test/foo/nonapi/foldertwo',
'/test/foo/nonapi/foldertwo/morestuff',
'/test/foo/nonapi/foldertwo/toomuch',
]
self.plugin.skipnose_exclude = ['api']
self._test_paths(valid)
def test_want_directory_exclude_multiple(self):
"""
Test wantDirectory with multiple exclude parameter
"""
valid = [
'/test',
'/test/bar/cat/one',
'/test/bar/cat/one/subone',
'/test/bar/cat/two',
'/test/bar/dog/one',
'/test/bar/dog/one/api/subapi', # implicitly skipped by walk
'/test/bar/dog/one/api/subapi/moreapi', # noqa implicitly skipped by walk
'/test/bar/dog/one/api/subapi/evenmoreapi', # noqa implicitly skipped by walk
'/test/foo/api/subapi', # implicitly skipped by walk
'/test/foo/api/subapi/moreapi', # implicitly skipped by walk
'/test/foo/api/subapi/evenmoreapi', # implicitly skipped by walk
'/test/foo/api/subsubapi', # implicitly skipped by walk
'/test/foo/api/subsubapi/toomuchapi', # implicitly skipped by walk
'/test/foo/nonapi', # implicitly skipped by walk
'/test/foo/nonapi/folderone', # implicitly skipped by walk
'/test/foo/nonapi/foldertwo', # implicitly skipped by walk
'/test/foo/nonapi/foldertwo/morestuff', # noqa implicitly skipped by walk
'/test/foo/nonapi/foldertwo/toomuch', # implicitly skipped by walk
]
self.plugin.skipnose_exclude = ['api', 'foo']
self._test_paths(valid)
def test_start_test_no_tests_to_skip(self):
self.plugin.skipnose_skip_tests = None
self.assertIsNone(self.plugin.startTest(mock.Mock()))
def test_start_test_function_test_case(self):
self.plugin.skipnose_skip_tests = ['one', 'two', 'foo.bar']
def test():
""""""
mock_test = mock.MagicMock()
mock_test.test = mock.MagicMock(spec=FunctionTestCase)
mock_test.test.test = mock.MagicMock(__module__='foo', __name__='bar')
mock_test.test._testMethodName = 'method'
mock_test.test.method = test
self.plugin.startTest(mock_test)
replaced_method = mock_test.test.method
self.assertIsNot(replaced_method, test)
self.assertTrue(callable(replaced_method))
with self.assertRaises(SkipTest):
replaced_method()
def test_start_test_method_test_case(self):
self.plugin.skipnose_skip_tests = ['one', 'two', 'foo.Foo.method']
class Foo(object):
def method(self):
""""""
Foo.__module__ = 'foo'
instance = Foo()
test = instance.method
mock_test = mock.MagicMock()
mock_test.test = instance
mock_test.test._testMethodName = 'method'
mock_test.test.method = test
self.plugin.startTest(mock_test)
replaced_method = mock_test.test.method
self.assertIsNot(replaced_method, test)
self.assertTrue(callable(replaced_method))
with self.assertRaises(SkipTest):
replaced_method()
| |
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.test import TestCase, skipUnlessDBFeature
from .models import (
CustomMembership, Employee, Event, Friendship, Group, Ingredient,
Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient,
Relationship,
)
class M2mThroughTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name='Bob')
cls.jim = Person.objects.create(name='Jim')
cls.jane = Person.objects.create(name='Jane')
cls.rock = Group.objects.create(name='Rock')
cls.roll = Group.objects.create(name='Roll')
def test_retrieve_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
expected = ['Jane', 'Jim']
self.assertQuerysetEqual(
self.rock.members.all(),
expected,
attrgetter("name")
)
def test_get_on_intermediate_model(self):
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.get(person=self.jane, group=self.rock)
self.assertEqual(
repr(queryset),
'<Membership: Jane is a member of Rock>'
)
def test_filter_on_intermediate_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
queryset = Membership.objects.filter(group=self.rock)
expected = [
'<Membership: Jim is a member of Rock>',
'<Membership: Jane is a member of Rock>',
]
self.assertQuerysetEqual(
queryset,
expected
)
def test_cannot_use_add_on_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.add(self.bob)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.create(name='Annie')
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_cannot_use_remove_on_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.remove(self.jim)
self.assertQuerysetEqual(
self.rock.members.all(),
['Jim', ],
attrgetter("name")
)
def test_cannot_use_setattr_on_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Person.objects.filter(name__in=['Bob', 'Jim']))
with self.assertRaisesMessage(AttributeError, msg):
self.rock.members.set(members)
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_clear_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
self.rock.members.clear()
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_retrieve_reverse_intermediate_items(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
expected = ['Rock', 'Roll']
self.assertQuerysetEqual(
self.jim.group_set.all(),
expected,
attrgetter("name")
)
def test_cannot_use_add_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use add() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.add(self.bob)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot use create() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.create(name='Funk')
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_cannot_use_remove_on_reverse_m2m_with_intermediary_model(self):
Membership.objects.create(person=self.bob, group=self.rock)
msg = 'Cannot use remove() on a ManyToManyField which specifies an intermediary model'
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.remove(self.rock)
self.assertQuerysetEqual(
self.bob.group_set.all(),
['Rock', ],
attrgetter('name')
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = 'Cannot set values on a ManyToManyField which specifies an intermediary model'
members = list(Group.objects.filter(name__in=['Rock', 'Roll']))
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.set(members)
self.assertQuerysetEqual(
self.bob.group_set.all(),
[]
)
def test_clear_on_reverse_removes_all_the_m2m_relationships(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jim, group=self.roll)
self.jim.group_set.clear()
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
def test_query_model_by_attribute_name_of_related_model(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),
['Roll', ],
attrgetter("name")
)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_order_by_relational_field_through_model(self):
CustomMembership.objects.create(person=self.jim, group=self.rock)
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jane, group=self.roll)
CustomMembership.objects.create(person=self.jim, group=self.roll)
self.assertSequenceEqual(
self.rock.custom_members.order_by('custom_person_related_name'),
[self.jim, self.bob]
)
self.assertSequenceEqual(
self.roll.custom_members.order_by('custom_person_related_name'),
[self.jane, self.jim]
)
def test_query_first_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Group.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Roll'],
attrgetter("name")
)
def test_query_second_model_by_intermediate_model_attribute(self):
Membership.objects.create(
person=self.jane, group=self.roll,
invite_reason="She was just awesome."
)
Membership.objects.create(
person=self.jim, group=self.roll,
invite_reason="He is good."
)
Membership.objects.create(person=self.bob, group=self.roll)
qs = Person.objects.filter(
membership__invite_reason="She was just awesome."
)
self.assertQuerysetEqual(
qs,
['Jane'],
attrgetter("name")
)
def test_query_model_by_related_model_name(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(person=self.jane, group=self.rock)
Membership.objects.create(person=self.bob, group=self.roll)
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(person=self.jane, group=self.roll)
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),
['Jane', 'Jim'],
attrgetter("name")
)
def test_query_model_by_custom_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),
['Bob', 'Jim'],
attrgetter("name")
)
def test_query_model_by_intermediate_can_return_non_unique_queryset(self):
Membership.objects.create(person=self.jim, group=self.rock)
Membership.objects.create(
person=self.jane, group=self.rock,
date_joined=datetime(2006, 1, 1)
)
Membership.objects.create(
person=self.bob, group=self.roll,
date_joined=datetime(2004, 1, 1))
Membership.objects.create(person=self.jim, group=self.roll)
Membership.objects.create(
person=self.jane, group=self.roll,
date_joined=datetime(2004, 1, 1))
qs = Person.objects.filter(
membership__date_joined__gt=datetime(2004, 1, 1)
)
self.assertQuerysetEqual(
qs,
['Jane', 'Jim', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_forward_empty_qs(self):
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
def test_custom_related_name_reverse_empty_qs(self):
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
def test_custom_related_name_forward_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.rock.custom_members.all(),
['Bob', 'Jim'],
attrgetter("name")
)
def test_custom_related_name_reverse_non_empty_qs(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
CustomMembership.objects.create(person=self.jim, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom.all(),
['Rock'],
attrgetter("name")
)
def test_custom_related_name_doesnt_conflict_with_fky_related_name(self):
CustomMembership.objects.create(person=self.bob, group=self.rock)
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),
['<CustomMembership: Bob is a member of Rock>']
)
def test_through_fields(self):
"""
Tests that relations with intermediary tables with multiple FKs
to the M2M's ``to`` model are possible.
"""
event = Event.objects.create(title='Rockwhale 2014')
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim)
Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane)
self.assertQuerysetEqual(
event.invitees.all(),
['Jane', 'Jim'],
attrgetter('name')
)
class M2mThroughReferentialTests(TestCase):
def test_self_referential_empty_qs(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
def test_self_referential_non_symmetrical_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_non_symmetrical_second_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
def test_self_referential_non_symmetrical_clear_first_side(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
chris.friends.clear()
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
def test_self_referential_symmetrical(self):
tony = PersonSelfRefM2M.objects.create(name="Tony")
chris = PersonSelfRefM2M.objects.create(name="Chris")
Friendship.objects.create(
first=tony, second=chris, date_friended=datetime.now()
)
Friendship.objects.create(
first=chris, second=tony, date_friended=datetime.now()
)
self.assertQuerysetEqual(
tony.friends.all(),
['Chris'],
attrgetter("name")
)
self.assertQuerysetEqual(
chris.friends.all(),
['Tony'],
attrgetter("name")
)
def test_through_fields_self_referential(self):
john = Employee.objects.create(name='john')
peter = Employee.objects.create(name='peter')
mary = Employee.objects.create(name='mary')
harry = Employee.objects.create(name='harry')
Relationship.objects.create(source=john, target=peter, another=None)
Relationship.objects.create(source=john, target=mary, another=None)
Relationship.objects.create(source=john, target=harry, another=peter)
self.assertQuerysetEqual(
john.subordinates.all(),
['peter', 'mary', 'harry'],
attrgetter('name')
)
class M2mThroughToFieldsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.pea = Ingredient.objects.create(iname='pea')
cls.potato = Ingredient.objects.create(iname='potato')
cls.tomato = Ingredient.objects.create(iname='tomato')
cls.curry = Recipe.objects.create(rname='curry')
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea)
RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato)
def test_retrieval(self):
# Forward retrieval
self.assertSequenceEqual(self.curry.ingredients.all(), [self.pea, self.potato, self.tomato])
# Backward retrieval
self.assertEqual(self.tomato.recipes.get(), self.curry)
def test_choices(self):
field = Recipe._meta.get_field('ingredients')
self.assertEqual(
[choice[0] for choice in field.get_choices(include_blank=False)],
['pea', 'potato', 'tomato']
)
| |
from __future__ import print_function, division
import time
from copy import deepcopy
import numpy as np
from pymanopt.solvers.linesearch import LineSearchAdaptive
from pymanopt.solvers.solver import Solver
from pymanopt import tools
BetaTypes = tools.make_enum(
"BetaTypes",
"FletcherReeves PolakRibiere HestenesStiefel HagerZhang".split())
class ConjugateGradient(Solver):
"""
Module containing conjugate gradient algorithm based on
conjugategradient.m from the manopt MATLAB package.
"""
def __init__(self, beta_type=BetaTypes.HestenesStiefel, orth_value=np.inf,
linesearch=None, *args, **kwargs):
"""
Instantiate gradient solver class.
Variable attributes (defaults in brackets):
- beta_type (BetaTypes.HestenesStiefel)
Conjugate gradient beta rule used to construct the new search
direction
- orth_value (numpy.inf)
Parameter for Powell's restart strategy. An infinite
value disables this strategy. See in code formula for
the specific criterion used.
- linesearch (LineSearchAdaptive)
The linesearch method to used.
"""
super(ConjugateGradient, self).__init__(*args, **kwargs)
self._beta_type = beta_type
self._orth_value = orth_value
if linesearch is None:
self._linesearch = LineSearchAdaptive()
else:
self._linesearch = linesearch
self.linesearch = None
def solve(self, problem, x=None, reuselinesearch=False):
"""
Perform optimization using nonlinear conjugate gradient method with
linesearch.
This method first computes the gradient of obj w.r.t. arg, and then
optimizes by moving in a direction that is conjugate to all previous
search directions.
Arguments:
- problem
Pymanopt problem setup using the Problem class, this must
have a .manifold attribute specifying the manifold to optimize
over, as well as a cost and enough information to compute
the gradient of that cost.
- x=None
Optional parameter. Starting point on the manifold. If none
then a starting point will be randomly generated.
- reuselinesearch=False
Whether to reuse the previous linesearch object. Allows to
use information from a previous solve run.
Returns:
- x
Local minimum of obj, or if algorithm terminated before
convergence x will be the point at which it terminated.
"""
man = problem.manifold
verbosity = problem.verbosity
objective = problem.cost
gradient = problem.grad
if not reuselinesearch or self.linesearch is None:
self.linesearch = deepcopy(self._linesearch)
linesearch = self.linesearch
# If no starting point is specified, generate one at random.
if x is None:
x = man.rand()
# Initialize iteration counter and timer
iter = 0
stepsize = np.nan
time0 = time.time()
if verbosity >= 1:
print("Optimizing...")
if verbosity >= 2:
print(" iter\t\t cost val\t grad. norm")
# Calculate initial cost-related quantities
cost = objective(x)
grad = gradient(x)
gradnorm = man.norm(x, grad)
Pgrad = problem.precon(x, grad)
gradPgrad = man.inner(x, grad, Pgrad)
# Initial descent direction is the negative gradient
desc_dir = -Pgrad
self._start_optlog(extraiterfields=['gradnorm'],
solverparams={'beta_type': self._beta_type,
'orth_value': self._orth_value,
'linesearcher': linesearch})
while True:
if verbosity >= 2:
print("%5d\t%+.16e\t%.8e" % (iter, cost, gradnorm))
if self._logverbosity >= 2:
self._append_optlog(iter, x, cost, gradnorm=gradnorm)
stop_reason = self._check_stopping_criterion(
time0, gradnorm=gradnorm, iter=iter + 1, stepsize=stepsize)
if stop_reason:
if verbosity >= 1:
print(stop_reason)
print('')
break
# The line search algorithms require the directional derivative of
# the cost at the current point x along the search direction.
df0 = man.inner(x, grad, desc_dir)
# If we didn't get a descent direction: restart, i.e., switch to
# the negative gradient. Equivalent to resetting the CG direction
# to a steepest descent step, which discards the past information.
if df0 >= 0:
# Or we switch to the negative gradient direction.
if verbosity >= 3:
print("Conjugate gradient info: got an ascent direction "
"(df0 = %.2f), reset to the (preconditioned) "
"steepest descent direction." % df0)
# Reset to negative gradient: this discards the CG memory.
desc_dir = -Pgrad
df0 = -gradPgrad
# Execute line search
stepsize, newx = linesearch.search(objective, man, x, desc_dir,
cost, df0)
# Compute the new cost-related quantities for newx
newcost = objective(newx)
newgrad = gradient(newx)
newgradnorm = man.norm(newx, newgrad)
Pnewgrad = problem.precon(newx, newgrad)
newgradPnewgrad = man.inner(newx, newgrad, Pnewgrad)
# Apply the CG scheme to compute the next search direction
oldgrad = man.transp(x, newx, grad)
orth_grads = man.inner(newx, oldgrad, Pnewgrad) / newgradPnewgrad
# Powell's restart strategy (see page 12 of Hager and Zhang's
# survey on conjugate gradient methods, for example)
if abs(orth_grads) >= self._orth_value:
beta = 0
desc_dir = -Pnewgrad
else:
desc_dir = man.transp(x, newx, desc_dir)
if self._beta_type == BetaTypes.FletcherReeves:
beta = newgradPnewgrad / gradPgrad
elif self._beta_type == BetaTypes.PolakRibiere:
diff = newgrad - oldgrad
ip_diff = man.inner(newx, Pnewgrad, diff)
beta = max(0, ip_diff / gradPgrad)
elif self._beta_type == BetaTypes.HestenesStiefel:
diff = newgrad - oldgrad
ip_diff = man.inner(newx, Pnewgrad, diff)
try:
beta = max(0,
ip_diff / man.inner(newx, diff, desc_dir))
# if ip_diff = man.inner(newx, diff, desc_dir) = 0
except ZeroDivisionError:
beta = 1
elif self._beta_type == BetaTypes.HagerZhang:
diff = newgrad - oldgrad
Poldgrad = man.transp(x, newx, Pgrad)
Pdiff = Pnewgrad - Poldgrad
deno = man.inner(newx, diff, desc_dir)
numo = man.inner(newx, diff, Pnewgrad)
numo -= (2 * man.inner(newx, diff, Pdiff) *
man.inner(newx, desc_dir, newgrad) / deno)
beta = numo / deno
# Robustness (see Hager-Zhang paper mentioned above)
desc_dir_norm = man.norm(newx, desc_dir)
eta_HZ = -1 / (desc_dir_norm * min(0.01, gradnorm))
beta = max(beta, eta_HZ)
else:
types = ", ".join(
["BetaTypes.%s" % t for t in BetaTypes._fields])
raise ValueError(
"Unknown beta_type %s. Should be one of %s." % (
self._beta_type, types))
desc_dir = -Pnewgrad + beta * desc_dir
# Update the necessary variables for the next iteration.
x = newx
cost = newcost
grad = newgrad
Pgrad = Pnewgrad
gradnorm = newgradnorm
gradPgrad = newgradPnewgrad
iter += 1
if self._logverbosity <= 0:
return x
else:
self._stop_optlog(x, cost, stop_reason, time0,
stepsize=stepsize, gradnorm=gradnorm,
iter=iter)
return x, self._optlog
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from unittest.mock import patch
import pytest
from kubernetes.client.rest import ApiException
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.cncf.kubernetes.sensors.spark_kubernetes import SparkKubernetesSensor
from airflow.utils import db, timezone
TEST_COMPLETED_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-24T07:34:22Z",
"generation": 1,
"labels": {"spark_flow_name": "spark-pi"},
"name": "spark-pi-2020-02-24-1",
"namespace": "default",
"resourceVersion": "455577",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "9f825516-6e1a-4af1-8967-b05661e8fb08",
},
"spec": {
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"spark_flow_name": "spark-pi", "version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
"volumeMounts": [{"mountPath": "/tmp", "name": "test-volume"}],
},
"executor": {
"cores": 1,
"instances": 3,
"labels": {"spark_flow_name": "spark-pi", "version": "2.4.4"},
"memory": "512m",
"volumeMounts": [{"mountPath": "/tmp", "name": "test-volume"}],
},
"image": "gcr.io/spark-operator/spark:v2.4.4",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
"volumes": [{"hostPath": {"path": "/tmp", "type": "Directory"}, "name": "test-volume"}],
},
"status": {
"applicationState": {"state": "COMPLETED"},
"driverInfo": {
"podName": "spark-pi-2020-02-24-1-driver",
"webUIAddress": "10.97.130.44:4040",
"webUIPort": 4040,
"webUIServiceName": "spark-pi-2020-02-24-1-ui-svc",
},
"executionAttempts": 1,
"executorState": {
"spark-pi-2020-02-24-1-1582529666227-exec-1": "FAILED",
"spark-pi-2020-02-24-1-1582529666227-exec-2": "FAILED",
"spark-pi-2020-02-24-1-1582529666227-exec-3": "FAILED",
},
"lastSubmissionAttemptTime": "2020-02-24T07:34:30Z",
"sparkApplicationId": "spark-7bb432c422ca46f3854838c419460fec",
"submissionAttempts": 1,
"submissionID": "1a1f9c5e-6bdd-4824-806f-40a814c1cf43",
"terminationTime": "2020-02-24T07:35:01Z",
},
}
TEST_FAILED_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-26T11:59:30Z",
"generation": 1,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "531657",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "f507ee3a-4461-45ef-86d8-ff42e4211e7d",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi123",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
"status": {
"applicationState": {
"errorMessage": "driver pod failed with ExitCode: 101, Reason: Error",
"state": "FAILED",
},
"driverInfo": {
"podName": "spark-pi-driver",
"webUIAddress": "10.108.18.168:4040",
"webUIPort": 4040,
"webUIServiceName": "spark-pi-ui-svc",
},
"executionAttempts": 1,
"lastSubmissionAttemptTime": "2020-02-26T11:59:38Z",
"sparkApplicationId": "spark-5fb7445d988f434cbe1e86166a0c038a",
"submissionAttempts": 1,
"submissionID": "26654a75-5bf6-4618-b191-0340280d2d3d",
"terminationTime": "2020-02-26T11:59:49Z",
},
}
TEST_UNKNOWN_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-24T07:34:22Z",
"generation": 1,
"labels": {"spark_flow_name": "spark-pi"},
"name": "spark-pi-2020-02-24-1",
"namespace": "default",
"resourceVersion": "455577",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "9f825516-6e1a-4af1-8967-b05661e8fb08",
},
"spec": {
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"spark_flow_name": "spark-pi", "version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
"volumeMounts": [{"mountPath": "/tmp", "name": "test-volume"}],
},
"executor": {
"cores": 1,
"instances": 3,
"labels": {"spark_flow_name": "spark-pi", "version": "2.4.4"},
"memory": "512m",
"volumeMounts": [{"mountPath": "/tmp", "name": "test-volume"}],
},
"image": "gcr.io/spark-operator/spark:v2.4.4",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
"volumes": [{"hostPath": {"path": "/tmp", "type": "Directory"}, "name": "test-volume"}],
},
"status": {
"applicationState": {"state": "UNKNOWN"},
"driverInfo": {
"podName": "spark-pi-2020-02-24-1-driver",
"webUIAddress": "10.97.130.44:4040",
"webUIPort": 4040,
"webUIServiceName": "spark-pi-2020-02-24-1-ui-svc",
},
"executionAttempts": 1,
"executorState": {
"spark-pi-2020-02-24-1-1582529666227-exec-1": "FAILED",
"spark-pi-2020-02-24-1-1582529666227-exec-2": "FAILED",
"spark-pi-2020-02-24-1-1582529666227-exec-3": "FAILED",
},
"lastSubmissionAttemptTime": "2020-02-24T07:34:30Z",
"sparkApplicationId": "spark-7bb432c422ca46f3854838c419460fec",
"submissionAttempts": 1,
"submissionID": "1a1f9c5e-6bdd-4824-806f-40a814c1cf43",
"terminationTime": "2020-02-24T07:35:01Z",
},
}
TEST_NOT_PROCESSED_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-26T09:14:48Z",
"generation": 1,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "525235",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "58da0778-fa72-4e90-8ddc-18b5e658f93d",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
}
TEST_RUNNING_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-26T09:11:25Z",
"generation": 1,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "525001",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "95ff1418-eeb5-454c-b59e-9e021aa3a239",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
"status": {
"applicationState": {"state": "RUNNING"},
"driverInfo": {
"podName": "spark-pi-driver",
"webUIAddress": "10.106.36.53:4040",
"webUIPort": 4040,
"webUIServiceName": "spark-pi-ui-svc",
},
"executionAttempts": 1,
"executorState": {"spark-pi-1582708290692-exec-1": "RUNNING"},
"lastSubmissionAttemptTime": "2020-02-26T09:11:35Z",
"sparkApplicationId": "spark-a47a002df46448f1a8395d7dd79ba448",
"submissionAttempts": 1,
"submissionID": "d4f5a768-b9d1-4a79-92b0-54779124d997",
"terminationTime": None,
},
}
TEST_SUBMITTED_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-26T09:16:53Z",
"generation": 1,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "525536",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "424a682b-6e5c-40d5-8a41-164253500b58",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
"status": {
"applicationState": {"state": "SUBMITTED"},
"driverInfo": {
"podName": "spark-pi-driver",
"webUIAddress": "10.108.175.17:4040",
"webUIPort": 4040,
"webUIServiceName": "spark-pi-ui-svc",
},
"executionAttempts": 1,
"lastSubmissionAttemptTime": "2020-02-26T09:17:03Z",
"sparkApplicationId": "spark-ae1a522d200246a99470743e880c5650",
"submissionAttempts": 1,
"submissionID": "f8b70b0b-3c81-403f-8c6d-e7f6c3653409",
"terminationTime": None,
},
}
TEST_NEW_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-26T09:16:53Z",
"generation": 1,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "525536",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "424a682b-6e5c-40d5-8a41-164253500b58",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
"status": {"applicationState": {"state": ""}},
}
TEST_PENDING_RERUN_APPLICATION = {
"apiVersion": "sparkoperator.k8s.io/v1beta2",
"kind": "SparkApplication",
"metadata": {
"creationTimestamp": "2020-02-27T08:03:02Z",
"generation": 4,
"name": "spark-pi",
"namespace": "default",
"resourceVersion": "552073",
"selfLink": "/apis/sparkoperator.k8s.io/v1beta2/namespaces/default/sparkapplications/spark-pi",
"uid": "0c93527d-4dd9-4006-b40a-1672872e8d6f",
},
"spec": {
"arguments": ["100000"],
"driver": {
"coreLimit": "1200m",
"cores": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
"serviceAccount": "default",
},
"executor": {
"cores": 1,
"instances": 1,
"labels": {"version": "2.4.4"},
"memory": "512m",
},
"image": "gcr.io/spark-operator/spark:v2.4.4-gcs-prometheus",
"imagePullPolicy": "Always",
"mainApplicationFile": "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar",
"mainClass": "org.apache.spark.examples.SparkPi",
"mode": "cluster",
"monitoring": {
"exposeDriverMetrics": True,
"exposeExecutorMetrics": True,
"prometheus": {
"jmxExporterJar": "/prometheus/jmx_prometheus_javaagent-0.11.0.jar",
"port": 8090,
},
},
"restartPolicy": {"type": "Never"},
"sparkVersion": "2.4.4",
"type": "Scala",
},
"status": {
"applicationState": {"state": "PENDING_RERUN"},
"driverInfo": {},
"lastSubmissionAttemptTime": None,
"terminationTime": None,
},
}
TEST_POD_LOGS = [b"LOG LINE 1\n", b"LOG LINE 2"]
TEST_POD_LOG_RESULT = "LOG LINE 1\nLOG LINE 2"
@patch("airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_conn")
class TestSparkKubernetesSensor(unittest.TestCase):
def setUp(self):
db.merge_conn(Connection(conn_id='kubernetes_default', conn_type='kubernetes', extra=json.dumps({})))
db.merge_conn(
Connection(
conn_id="kubernetes_default",
conn_type="kubernetes",
extra=json.dumps({}),
)
)
db.merge_conn(
Connection(
conn_id="kubernetes_with_namespace",
conn_type="kubernetes",
extra=json.dumps({"extra__kubernetes__namespace": "mock_namespace"}),
)
)
args = {"owner": "airflow", "start_date": timezone.datetime(2020, 2, 1)}
self.dag = DAG("test_dag_id", default_args=args)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_completed_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_FAILED_APPLICATION,
)
def test_failed_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
with pytest.raises(AirflowException):
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_NOT_PROCESSED_APPLICATION,
)
def test_not_processed_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_NEW_APPLICATION,
)
def test_new_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_RUNNING_APPLICATION,
)
def test_running_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_SUBMITTED_APPLICATION,
)
def test_submitted_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_PENDING_RERUN_APPLICATION,
)
def test_pending_rerun_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
assert not sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_UNKNOWN_APPLICATION,
)
def test_unknown_application(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(application_name="spark_pi", dag=self.dag, task_id="test_task_id")
with pytest.raises(AirflowException):
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="default",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_namespace_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
namespace="sensor_namespace",
task_id="test_task_id",
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="sensor_namespace",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_api_group_and_version_from_sensor(self, mock_get_namespaced_crd, mock_kubernetes_hook):
api_group = 'sparkoperator.example.com'
api_version = 'v1alpha1'
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
api_group=api_group,
api_version=api_version,
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group=api_group,
name="spark_pi",
namespace="mock_namespace",
plural="sparkapplications",
version=api_version,
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
def test_namespace_from_connection(self, mock_get_namespaced_crd, mock_kubernetes_hook):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
dag=self.dag,
kubernetes_conn_id="kubernetes_with_namespace",
task_id="test_task_id",
)
sensor.poke(None)
mock_kubernetes_hook.assert_called_once_with()
mock_get_namespaced_crd.assert_called_once_with(
group="sparkoperator.k8s.io",
name="spark_pi",
namespace="mock_namespace",
plural="sparkapplications",
version="v1beta2",
)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_FAILED_APPLICATION,
)
@patch("logging.Logger.error")
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
def test_driver_logging_failure(
self, mock_log_call, error_log_call, mock_get_namespaced_crd, mock_kube_conn
):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
with pytest.raises(AirflowException):
sensor.poke(None)
mock_log_call.assert_called_once_with("spark-pi-driver", namespace="default")
error_log_call.assert_called_once_with(TEST_POD_LOG_RESULT)
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
@patch("logging.Logger.info")
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
return_value=TEST_POD_LOGS,
)
def test_driver_logging_completed(
self, mock_log_call, info_log_call, mock_get_namespaced_crd, mock_kube_conn
):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(None)
mock_log_call.assert_called_once_with("spark-pi-2020-02-24-1-driver", namespace="default")
log_info_call = info_log_call.mock_calls[2]
log_value = log_info_call[1][0]
assert log_value == TEST_POD_LOG_RESULT
@patch(
"kubernetes.client.api.custom_objects_api.CustomObjectsApi.get_namespaced_custom_object",
return_value=TEST_COMPLETED_APPLICATION,
)
@patch("logging.Logger.warning")
@patch(
"airflow.providers.cncf.kubernetes.hooks.kubernetes.KubernetesHook.get_pod_logs",
side_effect=ApiException("Test api exception"),
)
def test_driver_logging_error(
self, mock_log_call, warn_log_call, mock_get_namespaced_crd, mock_kube_conn
):
sensor = SparkKubernetesSensor(
application_name="spark_pi",
attach_log=True,
dag=self.dag,
task_id="test_task_id",
)
sensor.poke(None)
warn_log_call.assert_called_once()
| |
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to support writing parsers.
Classes:
AbstractParser Base class for parsers.
AbstractConsumer Base class of all Consumers.
TaggingConsumer Consumer that tags output with its event. For debugging
SGMLStrippingConsumer Consumer that strips SGML tags from output.
EventGenerator Generate Biopython Events from Martel XML output
(note that Martel is now DEPRECATED)
Functions:
safe_readline Read a line from a handle, with check for EOF.
safe_peekline Peek at next line, with check for EOF.
read_and_call Read a line from a handle and pass it to a method.
read_and_call_while Read many lines, as long as a condition is met.
read_and_call_until Read many lines, until a condition is met.
attempt_read_and_call Like read_and_call, but forgiving of errors.
is_blank_line Test whether a line is blank.
"""
import sys
import traceback
from types import *
from Bio import File
# XML from python 2.0
try:
from xml.sax import handler
xml_support = 1
except ImportError:
sys.stderr.write("Warning: Could not import SAX for dealing with XML.\n" +
"This causes problems with some ParserSupport modules\n")
xml_support = 0
class AbstractParser(object):
"""Base class for other parsers.
"""
def parse(self, handle):
raise NotImplementedError("Please implement in a derived class")
def parse_str(self, string):
return self.parse(File.StringHandle(string))
def parse_file(self, filename):
h = open(filename)
try:
retval = self.parse(h)
finally:
h.close()
return retval
class AbstractConsumer(object):
"""Base class for other Consumers.
Derive Consumers from this class and implement appropriate
methods for each event that you want to receive.
"""
def _unhandled_section(self):
pass
def _unhandled(self, data):
pass
def __getattr__(self, attr):
if attr[:6] == 'start_' or attr[:4] == 'end_':
method = self._unhandled_section
else:
method = self._unhandled
return method
class TaggingConsumer(AbstractConsumer):
"""A Consumer that tags the data stream with the event and
prints it to a handle. Useful for debugging.
"""
def __init__(self, handle=None, colwidth=15, maxwidth=80):
"""TaggingConsumer(handle=sys.stdout, colwidth=15, maxwidth=80)"""
# I can't assign sys.stdout to handle in the argument list.
# If I do that, handle will be assigned the value of sys.stdout
# the first time this function is called. This will fail if
# the user has assigned sys.stdout to some other file, which may
# be closed or invalid at a later time.
if handle is None:
handle = sys.stdout
self._handle = handle
self._colwidth = colwidth
self._maxwidth = maxwidth
def unhandled_section(self):
self._print_name('unhandled_section')
def unhandled(self, data):
self._print_name('unhandled', data)
def _print_name(self, name, data=None):
if data is None:
# Write the name of a section.
self._handle.write("%s %s\n" % ("*"*self._colwidth, name))
else:
# Write the tag and line.
self._handle.write("%-*s: %s\n" % (
self._colwidth, name[:self._colwidth],
data[:self._maxwidth-self._colwidth-2].rstrip()))
def __getattr__(self, attr):
if attr[:6] == 'start_' or attr[:4] == 'end_':
method = lambda a=attr, s=self: s._print_name(a)
else:
method = lambda x, a=attr, s=self: s._print_name(a, x)
return method
class SGMLStrippingConsumer(object):
"""A consumer that strips off SGML tags.
This is meant to be used as a decorator for other consumers.
"""
def __init__(self, consumer):
if type(consumer) is not InstanceType:
raise ValueError("consumer should be an instance")
self._consumer = consumer
self._prev_attr = None
self._stripper = File.SGMLStripper()
def _apply_clean_data(self, data):
clean = self._stripper.strip(data)
self._prev_attr(clean)
def __getattr__(self, name):
if name in ['_prev_attr', '_stripper']:
return getattr(self, name)
attr = getattr(self._consumer, name)
# If this is not a method, then return it as is.
if type(attr) is not MethodType:
return attr
# If it's a section method, then return it.
if name[:6] == 'start_' or name[:4] == 'end_':
return attr
# Otherwise, it's an info event, and return my method.
self._prev_attr = attr
return self._apply_clean_data
# onle use the Event Generator if XML handling is okay
if xml_support:
class EventGenerator(handler.ContentHandler):
"""Handler to generate events associated with a Martel parsed file.
This acts like a normal SAX handler, and accepts XML generated by
Martel during parsing. These events are then converted into
'Biopython events', which can then be caught by a standard
biopython consumer.
Note that Martel is now DEPRECATED.
"""
def __init__(self, consumer, interest_tags, callback_finalizer = None,
exempt_tags = []):
"""Initialize to begin catching and firing off events.
Arguments:
o consumer - The consumer that we'll send Biopython events to.
o interest_tags - A listing of all the tags we are interested in.
o callback_finalizer - A function to deal with the collected
information before passing it on to the consumer. By default
the collected information is a list of all of the lines read
for a particular tag -- if there are multiple tags in a row
like:
<some_info>Spam<some_info>
<some_info>More Spam<some_info>
In this case the list of information would be:
['Spam', 'More Spam']
This list of lines will be passed to the callback finalizer if
it is present. Otherwise the consumer will be called with the
list of content information.
o exempt_tags - A listing of particular tags that are exempt from
being processed by the callback_finalizer. This allows you to
use a finalizer to deal with most tags, but leave those you don't
want touched.
"""
self._consumer = consumer
self.interest_tags = interest_tags
self._finalizer = callback_finalizer
self._exempt_tags = exempt_tags
# a dictionary of content for each tag of interest
# the information for each tag is held as a list of the lines.
# This allows us to collect information from multiple tags
# in a row, and return it all at once.
self.info = {}
for tag in self.interest_tags:
self.info[tag] = []
# the previous tag we were collecting information for.
# We set a delay in sending info to the consumer so that we can
# collect a bunch of tags in a row and append all of the info
# together.
self._previous_tag = ''
# the current character information for a tag
self._cur_content = []
# whether we should be collecting information
self._collect_characters = 0
def startElement(self, name, attrs):
"""Determine if we should collect characters from this tag.
"""
if name in self.interest_tags:
self._collect_characters = 1
def characters(self, content):
"""Extract the information if we are interested in it.
"""
if self._collect_characters:
self._cur_content.append(content)
def endElement(self, name):
"""Send the information to the consumer.
Once we've got the end element we've collected up all of the
character information we need, and we need to send this on to
the consumer to do something with it.
We have a delay of one tag on doing this, so that we can collect
all of the info from multiple calls to the same element at once.
"""
# only deal with the tag if it is something we are
# interested in and potentially have information for
if self._collect_characters:
# add all of the information collected inside this tag
self.info[name].append("".join(self._cur_content))
# reset our information and flags
self._cur_content = []
self._collect_characters = 0
# if we are at a new tag, pass on the info from the last tag
if self._previous_tag and self._previous_tag != name:
self._make_callback(self._previous_tag)
# set this tag as the next to be passed
self._previous_tag = name
def _make_callback(self, name):
"""Call the callback function with the info with the given name.
"""
# strip off whitespace and call the consumer
callback_function = getattr(self._consumer, name)
# --- pass back the information
# if there is a finalizer, use that
if self._finalizer is not None and name not in self._exempt_tags:
info_to_pass = self._finalizer(self.info[name])
# otherwise pass back the entire list of information
else:
info_to_pass = self.info[name]
callback_function(info_to_pass)
# reset the information for the tag
self.info[name] = []
def endDocument(self):
"""Make sure all of our information has been passed.
This just flushes out any stored tags that need to be passed.
"""
if self._previous_tag:
self._make_callback(self._previous_tag)
def read_and_call(uhandle, method, **keywds):
"""read_and_call(uhandle, method[, start][, end][, contains][, blank][, has_re])
Read a line from uhandle, check it, and pass it to the method.
Raises a ValueError if the line does not pass the checks.
start, end, contains, blank, and has_re specify optional conditions
that the line must pass. start and end specifies what the line must
begin or end with (not counting EOL characters). contains
specifies a substring that must be found in the line. If blank
is a true value, then the line must be blank. has_re should be
a regular expression object with a pattern that the line must match
somewhere.
"""
line = safe_readline(uhandle)
errmsg = _fails_conditions(*(line,), **keywds)
if errmsg is not None:
raise ValueError(errmsg)
method(line)
def read_and_call_while(uhandle, method, **keywds):
"""read_and_call_while(uhandle, method[, start][, end][, contains][, blank][, has_re]) -> number of lines
Read a line from uhandle and pass it to the method as long as
some condition is true. Returns the number of lines that were read.
See the docstring for read_and_call for a description of the parameters.
"""
nlines = 0
while 1:
line = safe_readline(uhandle)
# If I've failed the condition, then stop reading the line.
if _fails_conditions(*(line,), **keywds):
uhandle.saveline(line)
break
method(line)
nlines = nlines + 1
return nlines
def read_and_call_until(uhandle, method, **keywds):
"""read_and_call_until(uhandle, method,
start=None, end=None, contains=None, blank=None) -> number of lines
Read a line from uhandle and pass it to the method until
some condition is true. Returns the number of lines that were read.
See the docstring for read_and_call for a description of the parameters.
"""
nlines = 0
while 1:
line = safe_readline(uhandle)
# If I've met the condition, then stop reading the line.
if not _fails_conditions(*(line,), **keywds):
uhandle.saveline(line)
break
method(line)
nlines = nlines + 1
return nlines
def attempt_read_and_call(uhandle, method, **keywds):
"""attempt_read_and_call(uhandle, method, **keywds) -> boolean
Similar to read_and_call, but returns a boolean specifying
whether the line has passed the checks. Does not raise
exceptions.
See docs for read_and_call for a description of the function
arguments.
"""
line = safe_readline(uhandle)
passed = not _fails_conditions(*(line,), **keywds)
if passed:
method(line)
else:
uhandle.saveline(line)
return passed
def _fails_conditions(line, start=None, end=None, contains=None, blank=None,
has_re=None):
if start is not None:
if line[:len(start)] != start:
return "Line does not start with '%s':\n%s" % (start, line)
if end is not None:
if line.rstrip()[-len(end):] != end:
return "Line does not end with '%s':\n%s" % (end, line)
if contains is not None:
if line.find(contains) == -1:
return "Line does not contain '%s':\n%s" % (contains, line)
if blank is not None:
if blank:
if not is_blank_line(line):
return "Expected blank line, but got:\n%s" % line
else:
if is_blank_line(line):
return "Expected non-blank line, but got a blank one"
if has_re is not None:
if has_re.search(line) is None:
return "Line does not match regex '%s':\n%s" % (
has_re.pattern, line)
return None
def is_blank_line(line, allow_spaces=0):
"""is_blank_line(line, allow_spaces=0) -> boolean
Return whether a line is blank. allow_spaces specifies whether to
allow whitespaces in a blank line. A true value signifies that a
line containing whitespaces as well as end-of-line characters
should be considered blank.
"""
if not line:
return 1
if allow_spaces:
return line.rstrip() == ''
return line[0] == '\n' or line[0] == '\r'
def safe_readline(handle):
"""safe_readline(handle) -> line
Read a line from an UndoHandle and return it. If there are no more
lines to read, I will raise a ValueError.
"""
line = handle.readline()
if not line:
raise ValueError("Unexpected end of stream.")
return line
def safe_peekline(handle):
"""safe_peekline(handle) -> line
Peek at the next line in an UndoHandle and return it. If there are no
more lines to peek, I will raise a ValueError.
"""
line = handle.peekline()
if not line:
raise ValueError("Unexpected end of stream.")
return line
| |
"""
Support for iptables
Configuration Options
---------------------
The following options can be set in the minion config, grains, pillar, or
master config. The configuration is read using :py:func:`config.get
<salt.modules.config.get>`.
- ``iptables.save_filters``: List of REGEX strings to FILTER OUT matching lines
This is useful for filtering out chains, rules, etc that you do not wish to
persist, such as ephemeral Docker rules.
The default is to not filter out anything.
.. code-block:: yaml
iptables.save_filters:
- "-j CATTLE_PREROUTING"
- "-j DOCKER"
- "-A POSTROUTING"
- "-A CATTLE_POSTROUTING"
- "-A FORWARD"
"""
import logging
import os
import re
import string
import sys
import uuid
import salt.utils.args
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltException
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load the module if iptables is installed
"""
if not salt.utils.path.which("iptables"):
return (
False,
"The iptables execution module cannot be loaded: iptables not installed.",
)
return True
def _iptables_cmd(family="ipv4"):
"""
Return correct command based on the family, e.g. ipv4 or ipv6
"""
if family == "ipv6":
return salt.utils.path.which("ip6tables")
else:
return salt.utils.path.which("iptables")
def _has_option(option, family="ipv4"):
"""
Return truth of whether iptables has `option`. For example:
.. code-block:: python
_has_option('--wait')
_has_option('--check', family='ipv6')
"""
cmd = "{} --help".format(_iptables_cmd(family))
if option in __salt__["cmd.run_stdout"](cmd, output_loglevel="quiet"):
return True
return False
def _conf(family="ipv4"):
"""
Some distros have a specific location for config files
"""
if __grains__["os_family"] == "RedHat":
if family == "ipv6":
return "/etc/sysconfig/ip6tables"
else:
return "/etc/sysconfig/iptables"
elif __grains__["os_family"] == "Arch":
if family == "ipv6":
return "/etc/iptables/ip6tables.rules"
else:
return "/etc/iptables/iptables.rules"
elif __grains__["os_family"] == "Debian":
# This assumes the iptables-persistent package is installed
if family == "ipv6":
return "/etc/iptables/rules.v6"
else:
return "/etc/iptables/rules.v4"
elif __grains__["os"] == "Gentoo":
if family == "ipv6":
return "/var/lib/ip6tables/rules-save"
else:
return "/var/lib/iptables/rules-save"
elif __grains__["os_family"] == "Suse":
# SuSE does not seem to use separate files for IPv4 and IPv6
return "/etc/sysconfig/scripts/SuSEfirewall2-custom"
elif __grains__["os_family"] == "Void":
if family == "ipv4":
return "/etc/iptables/iptables.rules"
else:
return "/etc/iptables/ip6tables.rules"
elif __grains__["os"] == "Alpine":
if family == "ipv6":
return "/etc/iptables/rules6-save"
else:
return "/etc/iptables/rules-save"
elif __grains__["os_family"] == "NILinuxRT":
if family == "ipv6":
return "/etc/natinst/share/ip6tables.conf"
else:
return "/etc/natinst/share/iptables.conf"
else:
raise SaltException(
"Saving iptables to file is not"
+ " supported on {}.".format(__grains__["os"])
+ " Please file an issue with SaltStack"
)
def _conf_save_filters():
"""
Return array of strings from `save_filters` in config.
This array will be pulled from minion config, minion grains,
minion pillar, or master config. The default value returned is [].
.. code-block:: python
_conf_save_filters()
"""
config = __salt__["config.option"]("iptables.save_filters", [])
return config
def _regex_iptables_save(cmd_output, filters=None):
"""
Return string with `save_filter` regex entries removed. For example:
If `filters` is not provided, it will be pulled from minion config,
minion grains, minion pillar, or master config. Default return value
if no filters found is the original cmd_output string.
.. code-block:: python
_regex_iptables_save(cmd_output, ['-A DOCKER*'])
"""
# grab RE compiled filters from context for performance
if "iptables.save_filters" not in __context__:
__context__["iptables.save_filters"] = []
for pattern in filters or _conf_save_filters():
try:
__context__["iptables.save_filters"].append(re.compile(pattern))
except re.error as e:
log.warning("Skipping regex rule: '%s': %s", pattern, e)
continue
if __context__["iptables.save_filters"]:
# line by line get rid of any regex matches
_filtered_cmd_output = [
line
for line in cmd_output.splitlines(True)
if not any(reg.search(line) for reg in __context__["iptables.save_filters"])
]
return "".join(_filtered_cmd_output)
return cmd_output
def version(family="ipv4"):
"""
Return version from iptables --version
CLI Example:
.. code-block:: bash
salt '*' iptables.version
IPv6:
salt '*' iptables.version family=ipv6
"""
cmd = "{} --version".format(_iptables_cmd(family))
out = __salt__["cmd.run_stdout"](cmd).split()
return out[1]
def build_rule(
table="filter",
chain=None,
command=None,
position="",
full=None,
family="ipv4",
**kwargs
):
"""
Build a well-formatted iptables rule based on kwargs. A `table` and `chain`
are not required, unless `full` is True.
If `full` is `True`, then `table`, `chain` and `command` are required.
`command` may be specified as either a short option ('I') or a long option
(`--insert`). This will return the iptables command, exactly as it would
be used from the command line.
If a position is required (as with `-I` or `-D`), it may be specified as
`position`. This will only be useful if `full` is True.
If `state` is passed, it will be ignored, use `connstate`.
If `connstate` is passed in, it will automatically be changed to `state`.
To pass in jump options that doesn't take arguments, pass in an empty
string.
.. note::
Whereas iptables will accept ``-p``, ``--proto[c[o[l]]]`` as synonyms
of ``--protocol``, if ``--proto`` appears in an iptables command after
the appearance of ``-m policy``, it is interpreted as the ``--proto``
option of the policy extension (see the iptables-extensions(8) man
page).
CLI Examples:
.. code-block:: bash
salt '*' iptables.build_rule match=state \\
connstate=RELATED,ESTABLISHED jump=ACCEPT
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state connstate=RELATED,ESTABLISHED jump=ACCEPT
salt '*' iptables.build_rule filter INPUT command=A \\
full=True match=state connstate=RELATED,ESTABLISHED \\
source='127.0.0.1' jump=ACCEPT
.. Invert Rules
salt '*' iptables.build_rule filter INPUT command=A \\
full=True match=state connstate=RELATED,ESTABLISHED \\
source='!127.0.0.1' jump=ACCEPT
salt '*' iptables.build_rule filter INPUT command=A \\
full=True match=state connstate=RELATED,ESTABLISHED \\
destination='not 127.0.0.1' jump=ACCEPT
IPv6:
salt '*' iptables.build_rule match=state \\
connstate=RELATED,ESTABLISHED jump=ACCEPT \\
family=ipv6
salt '*' iptables.build_rule filter INPUT command=I position=3 \\
full=True match=state connstate=RELATED,ESTABLISHED jump=ACCEPT \\
family=ipv6
"""
if "target" in kwargs:
kwargs["jump"] = kwargs.pop("target")
# Ignore name and state for this function
kwargs.pop("name", None)
kwargs.pop("state", None)
for ignore in list(_STATE_INTERNAL_KEYWORDS) + ["chain", "save", "table"]:
if ignore in kwargs:
del kwargs[ignore]
rule = []
protocol = False
bang_not_pat = re.compile(r"(!|not)\s?")
def maybe_add_negation(arg):
"""
Will check if the defined argument is intended to be negated,
(i.e. prefixed with '!' or 'not'), and add a '! ' to the rule.
The prefix will be removed from the value in the kwargs dict.
"""
value = str(kwargs[arg])
if value.startswith("!") or value.startswith("not"):
kwargs[arg] = re.sub(bang_not_pat, "", value)
return "! "
return ""
if "if" in kwargs:
rule.append("{}-i {}".format(maybe_add_negation("if"), kwargs["if"]))
del kwargs["if"]
if "of" in kwargs:
rule.append("{}-o {}".format(maybe_add_negation("of"), kwargs["of"]))
del kwargs["of"]
if "proto" in kwargs and kwargs.get("match") != "policy":
kwargs["protocol"] = kwargs["proto"]
del kwargs["proto"]
# Handle the case 'proto' in kwargs and kwargs.get('match') == 'policy' below
if "protocol" in kwargs:
if not protocol:
rule.append(
"{}-p {}".format(maybe_add_negation("protocol"), kwargs["protocol"])
)
protocol = True
del kwargs["protocol"]
if "match" in kwargs:
match_value = kwargs["match"]
if not isinstance(match_value, list):
match_value = match_value.split(",")
for match in match_value:
rule.append("-m {}".format(match))
if "name_" in kwargs and match.strip() in ("pknock", "quota2", "recent"):
rule.append("--name {}".format(kwargs["name_"]))
del kwargs["name_"]
if "proto" in kwargs and kwargs.get("match") == "policy":
rule.append(
"{}--proto {}".format(maybe_add_negation("proto"), kwargs["proto"])
)
del kwargs["proto"]
del kwargs["match"]
if "match-set" in kwargs:
if isinstance(kwargs["match-set"], str):
kwargs["match-set"] = [kwargs["match-set"]]
for match_set in kwargs["match-set"]:
negative_match_set = ""
if match_set.startswith("!") or match_set.startswith("not"):
negative_match_set = "! "
match_set = re.sub(bang_not_pat, "", match_set)
rule.append("-m set {}--match-set {}".format(negative_match_set, match_set))
del kwargs["match-set"]
if "connstate" in kwargs:
if "-m state" not in rule:
rule.append("-m state")
rule.append(
"{}--state {}".format(maybe_add_negation("connstate"), kwargs["connstate"])
)
del kwargs["connstate"]
if "dport" in kwargs:
rule.append("{}--dport {}".format(maybe_add_negation("dport"), kwargs["dport"]))
del kwargs["dport"]
if "sport" in kwargs:
rule.append("{}--sport {}".format(maybe_add_negation("sport"), kwargs["sport"]))
del kwargs["sport"]
for multiport_arg in ("dports", "sports"):
if multiport_arg in kwargs:
if "-m multiport" not in rule:
rule.append("-m multiport")
if not protocol:
return "Error: protocol must be specified"
mp_value = kwargs[multiport_arg]
if isinstance(mp_value, list):
if any(
i
for i in mp_value
if str(i).startswith("!") or str(i).startswith("not")
):
mp_value = [
re.sub(bang_not_pat, "", str(item)) for item in mp_value
]
rule.append("!")
dports = ",".join(str(i) for i in mp_value)
else:
if str(mp_value).startswith("!") or str(mp_value).startswith("not"):
dports = re.sub(bang_not_pat, "", mp_value)
rule.append("!")
else:
dports = mp_value
rule.append("--{} {}".format(multiport_arg, dports))
del kwargs[multiport_arg]
if "comment" in kwargs:
if "-m comment" not in rule:
rule.append("-m comment")
rule.append('--comment "{}"'.format(kwargs["comment"]))
del kwargs["comment"]
# --set in ipset is deprecated, works but returns error.
# rewrite to --match-set if not empty, otherwise treat as recent option
if "set" in kwargs and kwargs["set"]:
rule.append("{}--match-set {}".format(maybe_add_negation("set"), kwargs["set"]))
del kwargs["set"]
# Jumps should appear last, except for any arguments that are passed to
# jumps, which of course need to follow.
after_jump = []
# All jump arguments as extracted from man iptables-extensions, man iptables,
# man xtables-addons and http://www.iptables.info/en/iptables-targets-and-jumps.html
after_jump_arguments = (
"j", # j and jump needs to be first
"jump",
# IPTABLES
"add-set",
"and-mark",
"and-tos",
"checksum-fill",
"clamp-mss-to-pmtu",
"clustermac",
"ctevents",
"ctmask",
"del-set",
"ecn-tcp-remove",
"exist",
"expevents",
"gateway",
"hash-init",
"hashmode",
"helper",
"label",
"local-node",
"log-ip-options",
"log-level",
"log-prefix",
"log-tcp-options",
"log-tcp-sequence",
"log-uid",
"mask",
"new",
"nfmask",
"nflog-group",
"nflog-prefix",
"nflog-range",
"nflog-threshold",
"nodst",
"notrack",
"on-ip",
"on-port",
"or-mark",
"or-tos",
"persistent",
"queue-balance",
"queue-bypass",
"queue-num",
"random",
"rateest-ewmalog",
"rateest-interval",
"rateest-name",
"reject-with",
"restore",
"restore-mark",
#'save', # no arg, problematic name: How do we avoid collision with this?
"save-mark",
"selctx",
"set-class",
"set-dscp",
"set-dscp-class",
"set-mark",
"set-mss",
"set-tos",
"set-xmark",
"strip-options",
"timeout",
"to",
"to-destination",
"to-ports",
"to-source",
"total-nodes",
"tproxy-mark",
"ttl-dec",
"ttl-inc",
"ttl-set",
"type",
"ulog-cprange",
"ulog-nlgroup",
"ulog-prefix",
"ulog-qthreshold",
"xor-mark",
"xor-tos",
"zone",
# IPTABLES-EXTENSIONS
"dst-pfx",
"hl-dec",
"hl-inc",
"hl-set",
"hmark-dport-mask",
"hmark-dst-prefix",
"hmark-mod",
"hmark-offset",
"hmark-proto-mask",
"hmark-rnd",
"hmark-spi-mask",
"hmark-sport-mask",
"hmark-src-prefix",
"hmark-tuple",
"led-always-blink",
"led-delay",
"led-trigger-id",
"queue-cpu-fanout",
"src-pfx",
# WEB
"to-port",
# XTABLES
"addr",
"and-mask",
"delude",
"honeypot",
"or-mask",
"prefix",
"reset",
"reuse",
"set-mac",
"shift",
"static",
"tarpit",
"tname",
"ttl",
)
for after_jump_argument in after_jump_arguments:
if after_jump_argument in kwargs:
value = kwargs[after_jump_argument]
if value in (None, ""): # options without arguments
after_jump.append("--{}".format(after_jump_argument))
elif any(ws_char in str(value) for ws_char in string.whitespace):
after_jump.append('--{} "{}"'.format(after_jump_argument, value))
else:
after_jump.append("--{} {}".format(after_jump_argument, value))
del kwargs[after_jump_argument]
for key in kwargs:
negation = maybe_add_negation(key)
# don't use .items() since maybe_add_negation removes the prefix from
# the value in the kwargs, thus we need to fetch it after that has run
value = kwargs[key]
flag = "-" if len(key) == 1 else "--"
value = "" if value in (None, "") else " {}".format(value)
rule.append("{}{}{}{}".format(negation, flag, key, value))
rule += after_jump
if full:
if not table:
return "Error: Table needs to be specified"
if not chain:
return "Error: Chain needs to be specified"
if not command:
return "Error: Command needs to be specified"
if command in "ACDIRLSFZNXPE":
flag = "-"
else:
flag = "--"
wait = "--wait" if _has_option("--wait", family) else ""
return "{} {} -t {} {}{} {} {} {}".format(
_iptables_cmd(family),
wait,
table,
flag,
command,
chain,
position,
" ".join(rule),
)
return " ".join(rule)
def get_saved_rules(conf_file=None, family="ipv4"):
"""
Return a data structure of the rules in the conf file
CLI Example:
.. code-block:: bash
salt '*' iptables.get_saved_rules
IPv6:
salt '*' iptables.get_saved_rules family=ipv6
"""
return _parse_conf(conf_file=conf_file, family=family)
def get_rules(family="ipv4"):
"""
Return a data structure of the current, in-memory rules
CLI Example:
.. code-block:: bash
salt '*' iptables.get_rules
IPv6:
salt '*' iptables.get_rules family=ipv6
"""
return _parse_conf(in_mem=True, family=family)
def get_saved_policy(table="filter", chain=None, conf_file=None, family="ipv4"):
"""
Return the current policy for the specified table/chain
CLI Examples:
.. code-block:: bash
salt '*' iptables.get_saved_policy filter INPUT
salt '*' iptables.get_saved_policy filter INPUT \\
conf_file=/etc/iptables.saved
IPv6:
salt '*' iptables.get_saved_policy filter INPUT family=ipv6
salt '*' iptables.get_saved_policy filter INPUT \\
conf_file=/etc/iptables.saved family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
rules = _parse_conf(conf_file, family=family)
try:
return rules[table][chain]["policy"]
except KeyError:
return None
def get_policy(table="filter", chain=None, family="ipv4"):
"""
Return the current policy for the specified table/chain
CLI Example:
.. code-block:: bash
salt '*' iptables.get_policy filter INPUT
IPv6:
salt '*' iptables.get_policy filter INPUT family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
rules = _parse_conf(in_mem=True, family=family)
try:
return rules[table][chain]["policy"]
except KeyError:
return None
def set_policy(table="filter", chain=None, policy=None, family="ipv4"):
"""
Set the current policy for the specified table/chain
CLI Example:
.. code-block:: bash
salt '*' iptables.set_policy filter INPUT ACCEPT
IPv6:
salt '*' iptables.set_policy filter INPUT ACCEPT family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
if not policy:
return "Error: Policy needs to be specified"
wait = "--wait" if _has_option("--wait", family) else ""
cmd = "{} {} -t {} -P {} {}".format(
_iptables_cmd(family), wait, table, chain, policy
)
out = __salt__["cmd.run_stderr"](cmd)
return out
def save(filename=None, family="ipv4"):
"""
Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' iptables.save /etc/sysconfig/iptables
IPv6:
salt '*' iptables.save /etc/sysconfig/iptables family=ipv6
"""
if _conf() and not filename:
filename = _conf(family)
log.debug("Saving rules to %s", filename)
parent_dir = os.path.dirname(filename)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
cmd = "{}-save".format(_iptables_cmd(family))
ipt = __salt__["cmd.run_stdout"](cmd)
# regex out the output if configured with filters
if _conf_save_filters():
ipt = _regex_iptables_save(ipt)
out = __salt__["file.write"](filename, ipt)
return out
def check(table="filter", chain=None, rule=None, family="ipv4"):
"""
Check for the existence of a rule in the table and chain
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' iptables.check filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.check filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
if not rule:
return "Error: Rule needs to be specified"
ipt_cmd = _iptables_cmd(family)
if _has_option("--check", family):
cmd = "{} -t {} -C {} {}".format(ipt_cmd, table, chain, rule)
out = __salt__["cmd.run_stderr"](cmd, output_loglevel="quiet")
else:
_chain_name = hex(uuid.getnode())
# Create temporary table
__salt__["cmd.run"]("{} -t {} -N {}".format(ipt_cmd, table, _chain_name))
__salt__["cmd.run"](
"{} -t {} -A {} {}".format(ipt_cmd, table, _chain_name, rule)
)
out = __salt__["cmd.run_stdout"]("{}-save".format(ipt_cmd))
# Clean up temporary table
__salt__["cmd.run"]("{} -t {} -F {}".format(ipt_cmd, table, _chain_name))
__salt__["cmd.run"]("{} -t {} -X {}".format(ipt_cmd, table, _chain_name))
for i in out.splitlines():
if i.startswith("-A {}".format(_chain_name)):
if i.replace(_chain_name, chain) in out.splitlines():
return True
return False
if not out:
return True
return out
def check_chain(table="filter", chain=None, family="ipv4"):
"""
.. versionadded:: 2014.1.0
Check for the existence of a chain in the table
CLI Example:
.. code-block:: bash
salt '*' iptables.check_chain filter INPUT
IPv6:
salt '*' iptables.check_chain filter INPUT family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
cmd = "{}-save -t {}".format(_iptables_cmd(family), table)
out = __salt__["cmd.run_stdout"](cmd).find(":{} ".format(chain))
if out != -1:
out = True
else:
out = False
return out
def new_chain(table="filter", chain=None, family="ipv4"):
"""
.. versionadded:: 2014.1.0
Create new custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.new_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.new_chain filter CUSTOM_CHAIN family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
wait = "--wait" if _has_option("--wait", family) else ""
cmd = "{} {} -t {} -N {}".format(_iptables_cmd(family), wait, table, chain)
out = __salt__["cmd.run_stderr"](cmd)
if not out:
out = True
return out
def delete_chain(table="filter", chain=None, family="ipv4"):
"""
.. versionadded:: 2014.1.0
Delete custom chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' iptables.delete_chain filter CUSTOM_CHAIN
IPv6:
salt '*' iptables.delete_chain filter CUSTOM_CHAIN family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
wait = "--wait" if _has_option("--wait", family) else ""
cmd = "{} {} -t {} -X {}".format(_iptables_cmd(family), wait, table, chain)
out = __salt__["cmd.run_stderr"](cmd)
if not out:
out = True
return out
def append(table="filter", chain=None, rule=None, family="ipv4"):
"""
Append a rule to the specified table/chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' iptables.append filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.append filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
if not rule:
return "Error: Rule needs to be specified"
wait = "--wait" if _has_option("--wait", family) else ""
returnCheck = check(table, chain, rule, family)
if isinstance(returnCheck, bool) and returnCheck:
return False
cmd = "{} {} -t {} -A {} {}".format(_iptables_cmd(family), wait, table, chain, rule)
out = __salt__["cmd.run_stderr"](cmd)
return not out
def insert(table="filter", chain=None, position=None, rule=None, family="ipv4"):
"""
Insert a rule into the specified table/chain, at the specified position.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
If the position specified is a negative number, then the insert will be
performed counting from the end of the list. For instance, a position
of -1 will insert the rule as the second to last rule. To insert a rule
in the last position, use the append function instead.
CLI Examples:
.. code-block:: bash
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.insert filter INPUT position=3 \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
"""
if not chain:
return "Error: Chain needs to be specified"
if not position:
return "Error: Position needs to be specified or use append (-A)"
if not rule:
return "Error: Rule needs to be specified"
if position < 0:
rules = get_rules(family=family)
size = len(rules[table][chain]["rules"])
position = (size + position) + 1
if position == 0:
position = 1
wait = "--wait" if _has_option("--wait", family) else ""
returnCheck = check(table, chain, rule, family)
if isinstance(returnCheck, bool) and returnCheck:
return False
cmd = "{} {} -t {} -I {} {} {}".format(
_iptables_cmd(family), wait, table, chain, position, rule
)
out = __salt__["cmd.run_stderr"](cmd)
return out
def delete(table, chain=None, position=None, rule=None, family="ipv4"):
"""
Delete a rule from the specified table/chain, specifying either the rule
in its entirety, or the rule's position in the chain.
This function accepts a rule in a standard iptables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Examples:
.. code-block:: bash
salt '*' iptables.delete filter INPUT position=3
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT'
IPv6:
salt '*' iptables.delete filter INPUT position=3 family=ipv6
salt '*' iptables.delete filter INPUT \\
rule='-m state --state RELATED,ESTABLISHED -j ACCEPT' \\
family=ipv6
"""
if position and rule:
return "Error: Only specify a position or a rule, not both"
if position:
rule = position
wait = "--wait" if _has_option("--wait", family) else ""
cmd = "{} {} -t {} -D {} {}".format(_iptables_cmd(family), wait, table, chain, rule)
out = __salt__["cmd.run_stderr"](cmd)
return out
def flush(table="filter", chain="", family="ipv4"):
"""
Flush the chain in the specified table, flush all chains in the specified
table if not specified chain.
CLI Example:
.. code-block:: bash
salt '*' iptables.flush filter INPUT
IPv6:
salt '*' iptables.flush filter INPUT family=ipv6
"""
wait = "--wait" if _has_option("--wait", family) else ""
cmd = "{} {} -t {} -F {}".format(_iptables_cmd(family), wait, table, chain)
out = __salt__["cmd.run_stderr"](cmd)
return out
def _parse_conf(conf_file=None, in_mem=False, family="ipv4"):
"""
If a file is not passed in, and the correct one for this OS is not
detected, return False
"""
if _conf() and not conf_file and not in_mem:
conf_file = _conf(family)
rules = ""
if conf_file:
with salt.utils.files.fopen(conf_file, "r") as ifile:
rules = ifile.read()
elif in_mem:
cmd = "{}-save".format(_iptables_cmd(family))
rules = __salt__["cmd.run_stdout"](cmd)
else:
raise SaltException("A file was not found to parse")
ret = {}
table = ""
parser = _parser()
for line in rules.splitlines():
line = salt.utils.stringutils.to_unicode(line)
if line.startswith("*"):
table = line.replace("*", "")
ret[table] = {}
elif line.startswith(":"):
comps = line.split()
chain = comps[0].replace(":", "")
ret[table][chain] = {}
ret[table][chain]["policy"] = comps[1]
counters = comps[2].replace("[", "").replace("]", "")
(pcount, bcount) = counters.split(":")
ret[table][chain]["packet count"] = pcount
ret[table][chain]["byte count"] = bcount
ret[table][chain]["rules"] = []
ret[table][chain]["rules_comment"] = {}
elif line.startswith("-A"):
args = salt.utils.args.shlex_split(line)
index = 0
while index + 1 < len(args):
swap = args[index] == "!" and args[index + 1].startswith("-")
if swap:
args[index], args[index + 1] = args[index + 1], args[index]
if args[index].startswith("-"):
index += 1
if args[index].startswith("-") or (args[index] == "!" and not swap):
args.insert(index, "")
else:
while (
index + 1 < len(args)
and args[index + 1] != "!"
and not args[index + 1].startswith("-")
):
args[index] += " {}".format(args.pop(index + 1))
index += 1
if args[-1].startswith("-"):
args.append("")
parsed_args = []
opts, _ = parser.parse_known_args(args)
parsed_args = vars(opts)
ret_args = {}
chain = parsed_args["append"]
for arg in parsed_args:
if parsed_args[arg] and arg != "append":
ret_args[arg] = parsed_args[arg]
if parsed_args["comment"] is not None:
comment = parsed_args["comment"][0].strip('"')
ret[table][chain[0]]["rules_comment"][comment] = ret_args
ret[table][chain[0]]["rules"].append(ret_args)
return ret
def _parser():
"""
This function attempts to list all the options documented in the
iptables(8) and iptables-extensions(8) man pages. They will not all be
used by all parts of the module; use them intelligently and appropriately.
"""
add_arg = None
if sys.version.startswith("2.6"):
import optparse
parser = optparse.OptionParser()
add_arg = parser.add_option
else:
import argparse # pylint: disable=minimum-python-version
parser = argparse.ArgumentParser()
add_arg = parser.add_argument
# COMMANDS
add_arg("-A", "--append", dest="append", action="append")
add_arg("-D", "--delete", dest="delete", action="append")
add_arg("-I", "--insert", dest="insert", action="append")
add_arg("-R", "--replace", dest="replace", action="append")
add_arg("-L", "--list", dest="list", action="append")
add_arg("-F", "--flush", dest="flush", action="append")
add_arg("-Z", "--zero", dest="zero", action="append")
add_arg("-N", "--new-chain", dest="new-chain", action="append")
add_arg("-X", "--delete-chain", dest="delete-chain", action="append")
add_arg("-P", "--policy", dest="policy", action="append")
add_arg("-E", "--rename-chain", dest="rename-chain", action="append")
# PARAMETERS
add_arg("-p", "--protocol", dest="protocol", action="append")
add_arg("-s", "--source", dest="source", action="append")
add_arg("-d", "--destination", dest="destination", action="append")
add_arg("-j", "--jump", dest="jump", action="append")
add_arg("-g", "--goto", dest="goto", action="append")
add_arg("-i", "--in-interface", dest="in-interface", action="append")
add_arg("-o", "--out-interface", dest="out-interface", action="append")
add_arg("-f", "--fragment", dest="fragment", action="append")
add_arg("-c", "--set-counters", dest="set-counters", action="append")
# MATCH EXTENSIONS
add_arg("-m", "--match", dest="match", action="append")
## addrtype
add_arg("--src-type", dest="src-type", action="append")
add_arg("--dst-type", dest="dst-type", action="append")
add_arg("--limit-iface-in", dest="limit-iface-in", action="append")
add_arg("--limit-iface-out", dest="limit-iface-out", action="append")
## ah
add_arg("--ahspi", dest="ahspi", action="append")
add_arg("--ahlen", dest="ahlen", action="append")
add_arg("--ahres", dest="ahres", action="append")
## bpf
add_arg("--bytecode", dest="bytecode", action="append")
## cgroup
add_arg("--cgroup", dest="cgroup", action="append")
## cluster
add_arg("--cluster-total-nodes", dest="cluster-total-nodes", action="append")
add_arg("--cluster-local-node", dest="cluster-local-node", action="append")
add_arg("--cluster-local-nodemask", dest="cluster-local-nodemask", action="append")
add_arg("--cluster-hash-seed", dest="cluster-hash-seed", action="append")
add_arg("--h-length", dest="h-length", action="append")
add_arg("--mangle-mac-s", dest="mangle-mac-s", action="append")
add_arg("--mangle-mac-d", dest="mangle-mac-d", action="append")
## comment
add_arg("--comment", dest="comment", action="append")
## connbytes
add_arg("--connbytes", dest="connbytes", action="append")
add_arg("--connbytes-dir", dest="connbytes-dir", action="append")
add_arg("--connbytes-mode", dest="connbytes-mode", action="append")
## connlabel
add_arg("--label", dest="label", action="append")
## connlimit
add_arg("--connlimit-upto", dest="connlimit-upto", action="append")
add_arg("--connlimit-above", dest="connlimit-above", action="append")
add_arg("--connlimit-mask", dest="connlimit-mask", action="append")
add_arg("--connlimit-saddr", dest="connlimit-saddr", action="append")
add_arg("--connlimit-daddr", dest="connlimit-daddr", action="append")
## connmark
add_arg("--mark", dest="mark", action="append")
## conntrack
add_arg("--ctstate", dest="ctstate", action="append")
add_arg("--ctproto", dest="ctproto", action="append")
add_arg("--ctorigsrc", dest="ctorigsrc", action="append")
add_arg("--ctorigdst", dest="ctorigdst", action="append")
add_arg("--ctreplsrc", dest="ctreplsrc", action="append")
add_arg("--ctrepldst", dest="ctrepldst", action="append")
add_arg("--ctorigsrcport", dest="ctorigsrcport", action="append")
add_arg("--ctorigdstport", dest="ctorigdstport", action="append")
add_arg("--ctreplsrcport", dest="ctreplsrcport", action="append")
add_arg("--ctrepldstport", dest="ctrepldstport", action="append")
add_arg("--ctstatus", dest="ctstatus", action="append")
add_arg("--ctexpire", dest="ctexpire", action="append")
add_arg("--ctdir", dest="ctdir", action="append")
## cpu
add_arg("--cpu", dest="cpu", action="append")
## dccp
add_arg("--sport", "--source-port", dest="source_port", action="append")
add_arg("--dport", "--destination-port", dest="destination_port", action="append")
add_arg("--dccp-types", dest="dccp-types", action="append")
add_arg("--dccp-option", dest="dccp-option", action="append")
## devgroup
add_arg("--src-group", dest="src-group", action="append")
add_arg("--dst-group", dest="dst-group", action="append")
## dscp
add_arg("--dscp", dest="dscp", action="append")
add_arg("--dscp-class", dest="dscp-class", action="append")
## dst
add_arg("--dst-len", dest="dst-len", action="append")
add_arg("--dst-opts", dest="dst-opts", action="append")
## ecn
add_arg("--ecn-tcp-cwr", dest="ecn-tcp-cwr", action="append")
add_arg("--ecn-tcp-ece", dest="ecn-tcp-ece", action="append")
add_arg("--ecn-ip-ect", dest="ecn-ip-ect", action="append")
## esp
add_arg("--espspi", dest="espspi", action="append")
## frag
add_arg("--fragid", dest="fragid", action="append")
add_arg("--fraglen", dest="fraglen", action="append")
add_arg("--fragres", dest="fragres", action="append")
add_arg("--fragfirst", dest="fragfirst", action="append")
add_arg("--fragmore", dest="fragmore", action="append")
add_arg("--fraglast", dest="fraglast", action="append")
## hashlimit
add_arg("--hashlimit-upto", dest="hashlimit-upto", action="append")
add_arg("--hashlimit-above", dest="hashlimit-above", action="append")
add_arg("--hashlimit-burst", dest="hashlimit-burst", action="append")
add_arg("--hashlimit-mode", dest="hashlimit-mode", action="append")
add_arg("--hashlimit-srcmask", dest="hashlimit-srcmask", action="append")
add_arg("--hashlimit-dstmask", dest="hashlimit-dstmask", action="append")
add_arg("--hashlimit-name", dest="hashlimit-name", action="append")
add_arg("--hashlimit-htable-size", dest="hashlimit-htable-size", action="append")
add_arg("--hashlimit-htable-max", dest="hashlimit-htable-max", action="append")
add_arg(
"--hashlimit-htable-expire", dest="hashlimit-htable-expire", action="append"
)
add_arg(
"--hashlimit-htable-gcinterval",
dest="hashlimit-htable-gcinterval",
action="append",
)
## hbh
add_arg("--hbh-len", dest="hbh-len", action="append")
add_arg("--hbh-opts", dest="hbh-opts", action="append")
## helper
add_arg("--helper", dest="helper", action="append")
## hl
add_arg("--hl-eq", dest="hl-eq", action="append")
add_arg("--hl-lt", dest="hl-lt", action="append")
add_arg("--hl-gt", dest="hl-gt", action="append")
## icmp
add_arg("--icmp-type", dest="icmp-type", action="append")
## icmp6
add_arg("--icmpv6-type", dest="icmpv6-type", action="append")
## iprange
add_arg("--src-range", dest="src-range", action="append")
add_arg("--dst-range", dest="dst-range", action="append")
## ipv6header
add_arg("--soft", dest="soft", action="append")
add_arg("--header", dest="header", action="append")
## ipvs
add_arg("--ipvs", dest="ipvs", action="append")
add_arg("--vproto", dest="vproto", action="append")
add_arg("--vaddr", dest="vaddr", action="append")
add_arg("--vport", dest="vport", action="append")
add_arg("--vdir", dest="vdir", action="append")
add_arg("--vmethod", dest="vmethod", action="append")
add_arg("--vportctl", dest="vportctl", action="append")
## length
add_arg("--length", dest="length", action="append")
## limit
add_arg("--limit", dest="limit", action="append")
add_arg("--limit-burst", dest="limit-burst", action="append")
## mac
add_arg("--mac-source", dest="mac-source", action="append")
## mh
add_arg("--mh-type", dest="mh-type", action="append")
## multiport
add_arg("--sports", "--source-ports", dest="source-ports", action="append")
add_arg(
"--dports", "--destination-ports", dest="destination-ports", action="append"
)
add_arg("--ports", dest="ports", action="append")
## nfacct
add_arg("--nfacct-name", dest="nfacct-name", action="append")
## osf
add_arg("--genre", dest="genre", action="append")
add_arg("--ttl", dest="ttl", action="append")
add_arg("--log", dest="log", action="append")
## owner
add_arg("--uid-owner", dest="uid-owner", action="append")
add_arg("--gid-owner", dest="gid-owner", action="append")
add_arg("--socket-exists", dest="socket-exists", action="append")
## physdev
add_arg("--physdev-in", dest="physdev-in", action="append")
add_arg("--physdev-out", dest="physdev-out", action="append")
add_arg("--physdev-is-in", dest="physdev-is-in", action="append")
add_arg("--physdev-is-out", dest="physdev-is-out", action="append")
add_arg("--physdev-is-bridged", dest="physdev-is-bridged", action="append")
## pkttype
add_arg("--pkt-type", dest="pkt-type", action="append")
## policy
add_arg("--dir", dest="dir", action="append")
add_arg("--pol", dest="pol", action="append")
add_arg("--strict", dest="strict", action="append")
add_arg("--reqid", dest="reqid", action="append")
add_arg("--spi", dest="spi", action="append")
add_arg("--proto", dest="proto", action="append")
add_arg("--mode", dest="mode", action="append")
add_arg("--tunnel-src", dest="tunnel-src", action="append")
add_arg("--tunnel-dst", dest="tunnel-dst", action="append")
add_arg("--next", dest="next", action="append")
## quota
add_arg("--quota", dest="quota", action="append")
## rateest
add_arg("--rateest", dest="rateest", action="append")
add_arg("--rateest1", dest="rateest1", action="append")
add_arg("--rateest2", dest="rateest2", action="append")
add_arg("--rateest-delta", dest="rateest-delta", action="append")
add_arg("--rateest-bps", dest="rateest-bps", action="append")
add_arg("--rateest-bps1", dest="rateest-bps1", action="append")
add_arg("--rateest-bps2", dest="rateest-bps2", action="append")
add_arg("--rateest-pps", dest="rateest-pps", action="append")
add_arg("--rateest-pps1", dest="rateest-pps1", action="append")
add_arg("--rateest-pps2", dest="rateest-pps2", action="append")
add_arg("--rateest-lt", dest="rateest-lt", action="append")
add_arg("--rateest-gt", dest="rateest-gt", action="append")
add_arg("--rateest-eq", dest="rateest-eq", action="append")
add_arg("--rateest-name", dest="rateest-name", action="append")
add_arg("--rateest-interval", dest="rateest-interval", action="append")
add_arg("--rateest-ewma", dest="rateest-ewma", action="append")
## realm
add_arg("--realm", dest="realm", action="append")
## recent
add_arg("--name", dest="name", action="append")
add_arg("--set", dest="set", action="append")
add_arg("--rsource", dest="rsource", action="append")
add_arg("--rdest", dest="rdest", action="append")
add_arg("--mask", dest="mask", action="append")
add_arg("--rcheck", dest="rcheck", action="append")
add_arg("--update", dest="update", action="append")
add_arg("--remove", dest="remove", action="append")
add_arg("--seconds", dest="seconds", action="append")
add_arg("--reap", dest="reap", action="append")
add_arg("--hitcount", dest="hitcount", action="append")
add_arg("--rttl", dest="rttl", action="append")
## rpfilter
add_arg("--loose", dest="loose", action="append")
add_arg("--validmark", dest="validmark", action="append")
add_arg("--accept-local", dest="accept-local", action="append")
add_arg("--invert", dest="invert", action="append")
## rt
add_arg("--rt-type", dest="rt-type", action="append")
add_arg("--rt-segsleft", dest="rt-segsleft", action="append")
add_arg("--rt-len", dest="rt-len", action="append")
add_arg("--rt-0-res", dest="rt-0-res", action="append")
add_arg("--rt-0-addrs", dest="rt-0-addrs", action="append")
add_arg("--rt-0-not-strict", dest="rt-0-not-strict", action="append")
## sctp
add_arg("--chunk-types", dest="chunk-types", action="append")
## set
add_arg("--match-set", dest="match-set", action="append")
add_arg("--return-nomatch", dest="return-nomatch", action="append")
add_arg("--update-counters", dest="update-counters", action="append")
add_arg("--update-subcounters", dest="update-subcounters", action="append")
add_arg("--packets-eq", dest="packets-eq", action="append")
add_arg("--packets-lt", dest="packets-lt", action="append")
add_arg("--packets-gt", dest="packets-gt", action="append")
add_arg("--bytes-eq", dest="bytes-eq", action="append")
add_arg("--bytes-lt", dest="bytes-lt", action="append")
add_arg("--bytes-gt", dest="bytes-gt", action="append")
## socket
add_arg("--transparent", dest="transparent", action="append")
add_arg("--nowildcard", dest="nowildcard", action="append")
## state
add_arg("--state", dest="state", action="append")
## statistic
add_arg("--probability", dest="probability", action="append")
add_arg("--every", dest="every", action="append")
add_arg("--packet", dest="packet", action="append")
## string
add_arg("--algo", dest="algo", action="append")
add_arg("--from", dest="from", action="append")
add_arg("--to", dest="to", action="append")
add_arg("--string", dest="string", action="append")
add_arg("--hex-string", dest="hex-string", action="append")
## tcp
add_arg("--tcp-flags", dest="tcp-flags", action="append")
add_arg("--syn", dest="syn", action="append")
add_arg("--tcp-option", dest="tcp-option", action="append")
## tcpmss
add_arg("--mss", dest="mss", action="append")
## time
add_arg("--datestart", dest="datestart", action="append")
add_arg("--datestop", dest="datestop", action="append")
add_arg("--timestart", dest="timestart", action="append")
add_arg("--timestop", dest="timestop", action="append")
add_arg("--monthdays", dest="monthdays", action="append")
add_arg("--weekdays", dest="weekdays", action="append")
add_arg("--contiguous", dest="contiguous", action="append")
add_arg("--kerneltz", dest="kerneltz", action="append")
add_arg("--utc", dest="utc", action="append")
add_arg("--localtz", dest="localtz", action="append")
## tos
add_arg("--tos", dest="tos", action="append")
## ttl
add_arg("--ttl-eq", dest="ttl-eq", action="append")
add_arg("--ttl-gt", dest="ttl-gt", action="append")
add_arg("--ttl-lt", dest="ttl-lt", action="append")
## u32
add_arg("--u32", dest="u32", action="append")
# Xtables-addons matches
## condition
add_arg("--condition", dest="condition", action="append")
## dhcpmac
add_arg("--mac", dest="mac", action="append")
## fuzzy
add_arg("--lower-limit", dest="lower-limit", action="append")
add_arg("--upper-limit", dest="upper-limit", action="append")
## geoip
add_arg("--src-cc", "--source-country", dest="source-country", action="append")
add_arg(
"--dst-cc", "--destination-country", dest="destination-country", action="append"
)
## gradm
add_arg("--enabled", dest="enabled", action="append")
add_arg("--disabled", dest="disabled", action="append")
## iface
add_arg("--iface", dest="iface", action="append")
add_arg("--dev-in", dest="dev-in", action="append")
add_arg("--dev-out", dest="dev-out", action="append")
add_arg("--up", dest="up", action="append")
add_arg("--down", dest="down", action="append")
add_arg("--broadcast", dest="broadcast", action="append")
add_arg("--loopback", dest="loopback", action="append")
add_arg("--pointtopoint", dest="pointtopoint", action="append")
add_arg("--running", dest="running", action="append")
add_arg("--noarp", dest="noarp", action="append")
add_arg("--arp", dest="arp", action="append")
add_arg("--promisc", dest="promisc", action="append")
add_arg("--multicast", dest="multicast", action="append")
add_arg("--dynamic", dest="dynamic", action="append")
add_arg("--lower-up", dest="lower-up", action="append")
add_arg("--dormant", dest="dormant", action="append")
## ipp2p
add_arg("--edk", dest="edk", action="append")
add_arg("--kazaa", dest="kazaa", action="append")
add_arg("--gnu", dest="gnu", action="append")
add_arg("--dc", dest="dc", action="append")
add_arg("--bit", dest="bit", action="append")
add_arg("--apple", dest="apple", action="append")
add_arg("--soul", dest="soul", action="append")
add_arg("--winmx", dest="winmx", action="append")
add_arg("--ares", dest="ares", action="append")
add_arg("--debug", dest="debug", action="append")
## ipv4options
add_arg("--flags", dest="flags", action="append")
add_arg("--any", dest="any", action="append")
## length2
add_arg("--layer3", dest="layer3", action="append")
add_arg("--layer4", dest="layer4", action="append")
add_arg("--layer5", dest="layer5", action="append")
## lscan
add_arg("--stealth", dest="stealth", action="append")
add_arg("--synscan", dest="synscan", action="append")
add_arg("--cnscan", dest="cnscan", action="append")
add_arg("--grscan", dest="grscan", action="append")
## psd
add_arg("--psd-weight-threshold", dest="psd-weight-threshold", action="append")
add_arg("--psd-delay-threshold", dest="psd-delay-threshold", action="append")
add_arg("--psd-lo-ports-weight", dest="psd-lo-ports-weight", action="append")
add_arg("--psd-hi-ports-weight", dest="psd-hi-ports-weight", action="append")
## quota2
add_arg("--grow", dest="grow", action="append")
add_arg("--no-change", dest="no-change", action="append")
add_arg("--packets", dest="packets", action="append")
## pknock
add_arg("--knockports", dest="knockports", action="append")
add_arg("--time", dest="time", action="append")
add_arg("--autoclose", dest="autoclose", action="append")
add_arg("--checkip", dest="checkip", action="append")
# TARGET EXTENSIONS
## AUDIT
add_arg("--type", dest="type", action="append")
## CHECKSUM
add_arg("--checksum-fill", dest="checksum-fill", action="append")
## CLASSIFY
add_arg("--set-class", dest="set-class", action="append")
## CLUSTERIP
add_arg("--new", dest="new", action="append")
add_arg("--hashmode", dest="hashmode", action="append")
add_arg("--clustermac", dest="clustermac", action="append")
add_arg("--total-nodes", dest="total-nodes", action="append")
add_arg("--local-node", dest="local-node", action="append")
add_arg("--hash-init", dest="hash-init", action="append")
## CONNMARK
add_arg("--set-xmark", dest="set-xmark", action="append")
add_arg("--save-mark", dest="save-mark", action="append")
add_arg("--restore-mark", dest="restore-mark", action="append")
add_arg("--and-mark", dest="and-mark", action="append")
add_arg("--or-mark", dest="or-mark", action="append")
add_arg("--xor-mark", dest="xor-mark", action="append")
add_arg("--set-mark", dest="set-mark", action="append")
add_arg("--nfmask", dest="nfmask", action="append")
add_arg("--ctmask", dest="ctmask", action="append")
## CONNSECMARK
add_arg("--save", dest="save", action="append")
add_arg("--restore", dest="restore", action="append")
## CT
add_arg("--notrack", dest="notrack", action="append")
add_arg("--ctevents", dest="ctevents", action="append")
add_arg("--expevents", dest="expevents", action="append")
add_arg("--zone", dest="zone", action="append")
add_arg("--timeout", dest="timeout", action="append")
## DNAT
add_arg("--to-destination", dest="to-destination", action="append")
add_arg("--random", dest="random", action="append")
add_arg("--persistent", dest="persistent", action="append")
## DNPT
add_arg("--src-pfx", dest="src-pfx", action="append")
add_arg("--dst-pfx", dest="dst-pfx", action="append")
## DSCP
add_arg("--set-dscp", dest="set-dscp", action="append")
add_arg("--set-dscp-class", dest="set-dscp-class", action="append")
## ECN
add_arg("--ecn-tcp-remove", dest="ecn-tcp-remove", action="append")
## HL
add_arg("--hl-set", dest="hl-set", action="append")
add_arg("--hl-dec", dest="hl-dec", action="append")
add_arg("--hl-inc", dest="hl-inc", action="append")
## HMARK
add_arg("--hmark-tuple", dest="hmark-tuple", action="append")
add_arg("--hmark-mod", dest="hmark-mod", action="append")
add_arg("--hmark-offset", dest="hmark-offset", action="append")
add_arg("--hmark-src-prefix", dest="hmark-src-prefix", action="append")
add_arg("--hmark-dst-prefix", dest="hmark-dst-prefix", action="append")
add_arg("--hmark-sport-mask", dest="hmark-sport-mask", action="append")
add_arg("--hmark-dport-mask", dest="hmark-dport-mask", action="append")
add_arg("--hmark-spi-mask", dest="hmark-spi-mask", action="append")
add_arg("--hmark-proto-mask", dest="hmark-proto-mask", action="append")
add_arg("--hmark-rnd", dest="hmark-rnd", action="append")
## LED
add_arg("--led-trigger-id", dest="led-trigger-id", action="append")
add_arg("--led-delay", dest="led-delay", action="append")
add_arg("--led-always-blink", dest="led-always-blink", action="append")
## LOG
add_arg("--log-level", dest="log-level", action="append")
add_arg("--log-prefix", dest="log-prefix", action="append")
add_arg("--log-tcp-sequence", dest="log-tcp-sequence", action="append")
add_arg("--log-tcp-options", dest="log-tcp-options", action="append")
add_arg("--log-ip-options", dest="log-ip-options", action="append")
add_arg("--log-uid", dest="log-uid", action="append")
## MASQUERADE
add_arg("--to-ports", dest="to-ports", action="append")
## NFLOG
add_arg("--nflog-group", dest="nflog-group", action="append")
add_arg("--nflog-prefix", dest="nflog-prefix", action="append")
add_arg("--nflog-range", dest="nflog-range", action="append")
add_arg("--nflog-threshold", dest="nflog-threshold", action="append")
## NFQUEUE
add_arg("--queue-num", dest="queue-num", action="append")
add_arg("--queue-balance", dest="queue-balance", action="append")
add_arg("--queue-bypass", dest="queue-bypass", action="append")
add_arg("--queue-cpu-fanout", dest="queue-cpu-fanout", action="append")
## RATEEST
add_arg("--rateest-ewmalog", dest="rateest-ewmalog", action="append")
## REJECT
add_arg("--reject-with", dest="reject-with", action="append")
## SAME
add_arg("--nodst", dest="nodst", action="append")
## SECMARK
add_arg("--selctx", dest="selctx", action="append")
## SET
add_arg("--add-set", dest="add-set", action="append")
add_arg("--del-set", dest="del-set", action="append")
add_arg("--exist", dest="exist", action="append")
## SNAT
add_arg("--to-source", dest="to-source", action="append")
## TCPMSS
add_arg("--set-mss", dest="set-mss", action="append")
add_arg("--clamp-mss-to-pmtu", dest="clamp-mss-to-pmtu", action="append")
## TCPOPTSTRIP
add_arg("--strip-options", dest="strip-options", action="append")
## TEE
add_arg("--gateway", dest="gateway", action="append")
## TOS
add_arg("--set-tos", dest="set-tos", action="append")
add_arg("--and-tos", dest="and-tos", action="append")
add_arg("--or-tos", dest="or-tos", action="append")
add_arg("--xor-tos", dest="xor-tos", action="append")
## TPROXY
add_arg("--on-port", dest="on-port", action="append")
add_arg("--on-ip", dest="on-ip", action="append")
add_arg("--tproxy-mark", dest="tproxy-mark", action="append")
## TTL
add_arg("--ttl-set", dest="ttl-set", action="append")
add_arg("--ttl-dec", dest="ttl-dec", action="append")
add_arg("--ttl-inc", dest="ttl-inc", action="append")
## ULOG
add_arg("--ulog-nlgroup", dest="ulog-nlgroup", action="append")
add_arg("--ulog-prefix", dest="ulog-prefix", action="append")
add_arg("--ulog-cprange", dest="ulog-cprange", action="append")
add_arg("--ulog-qthreshold", dest="ulog-qthreshold", action="append")
# Xtables-addons targets
## ACCOUNT
add_arg("--addr", dest="addr", action="append")
add_arg("--tname", dest="tname", action="append")
## CHAOS
add_arg("--delude", dest="delude", action="append")
add_arg("--tarpit", dest="tarpit", action="append")
## DHCPMAC
add_arg("--set-mac", dest="set-mac", action="append")
## DNETMAP
add_arg("--prefix", dest="prefix", action="append")
add_arg("--reuse", dest="reuse", action="append")
add_arg("--static", dest="static", action="append")
## IPMARK
add_arg("--and-mask", dest="and-mask", action="append")
add_arg("--or-mask", dest="or-mask", action="append")
add_arg("--shift", dest="shift", action="append")
## TARPIT
add_arg("--honeypot", dest="honeypot", action="append")
add_arg("--reset", dest="reset", action="append")
return parser
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for estimating the mutual information with kNN algorithm."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow_data_validation.utils import mutual_information_util
_MI = mutual_information_util.mutual_information
_AMI = mutual_information_util.adjusted_mutual_information
class RanklabMutualInformationTest(parameterized.TestCase):
def _MakeCorrelatedFeatures(self, means, rho):
# Make n correlated Gaussian random features, and also compute the
# theoretical mutual information between the first n-1 features and the last
# feature.
np.random.seed(30)
means = np.array(means)
n = means.size
cov = np.ones((n, n)) * rho
cov[range(n), range(n)] = 1
dat = np.random.multivariate_normal(means, cov, 50000)
# Theoretical value of the mutual information.
expected_mi = -0.5 * (
np.log2(np.linalg.det(cov)) - np.log2(np.linalg.det(cov[:-1, :-1])))
return [dat[:, i] for i in range(n)], expected_mi
def testOrdinalIndependentFeatures(self):
np.random.seed(29)
r0 = np.random.randn(50000)
r1 = np.random.randn(50000)
for method in ['smaller_data', 'larger_data']:
result = _MI([r0], [r1], [False], [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(result, 0, places=2)
def testEntropy(self):
# Estimate the entropy by computing the mutual information with itself.
np.random.seed(23)
r = np.random.randint(0, 8, 50000) # 8 categories.
for method in ['smaller_data', 'larger_data']:
result = _MI([r], [r], [True], [True], estimate_method=method, seed=21)
self.assertAlmostEqual(result, 3, delta=1e-2)
# Treat it as a ordinal variable.
result = _MI([r], [r], [False], [False], estimate_method=method, seed=21)
self.assertAlmostEqual(result, 3, delta=1e-2)
def testCorrelatedGaussians(self):
# The mutual information between correlated Gaussian random variables can be
# theoretically computed, which provides a nice test for the code.
rho = 0.4
[f0, f1], expected = self._MakeCorrelatedFeatures([10, 20], rho)
result = _MI([f0], [f1], [False], [False],
estimate_method='smaller_data',
seed=21)
self.assertAlmostEqual(result, expected, places=2)
result = _MI([f0], [f1], [False], [False],
estimate_method='larger_data',
seed=21)
self.assertAlmostEqual(result, expected, places=2)
# Higher dimension.
rho = 0.9 # fairly strongly dependent features
[f0, f1, f2, f3], expected = self._MakeCorrelatedFeatures([1, 2, -3, 4],
rho)
for method in ['smaller_data', 'larger_data']:
result = _MI([f1, f2, f3], [f0], [False] * 3, [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(result, expected, delta=2e-2)
def testAddingIndependentFeature(self):
# Adding an independent feature into the computation, does not alter the
# mutual information.
np.random.seed(23)
r = np.random.randint(0, 8, 50000)
s = np.random.randint(0, 3, 50000) + r
w = np.random.randn(50000)
for method in ['smaller_data', 'larger_data']:
mi_rs = _MI([r], [s], [False], [False], estimate_method=method, seed=21)
mi_rws = _MI([r, w], [s], [False] * 2, [False],
estimate_method=method,
seed=21)
self.assertAlmostEqual(mi_rws, mi_rs, places=2)
def testMissingValues(self):
np.random.seed(23)
fz = np.array([1.] * 10000)
fx = np.random.random(10000)
fa = np.array([1] * 5000 + [2] * 5000, dtype=float)
fb = np.array([2.3] * 5000 + [None] * 5000)
fc = np.array([0.] * 5000 + [10.] * 5000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fz], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
result = _MI([fc], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
result = _MI([fb], [fa], [False], [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
# Add an independent feature does not affect.
result = _MI([fc, fx], [fa], [False] * 2, [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
result = _MI([fb, fx], [fa], [False] * 2, [False],
seed=23,
estimate_method=method)
self.assertLess(abs(result - 1), 1e-2)
def testFilterFeat(self):
np.random.seed(3)
fa = np.array(['cat0'] * 2000 + ['cat1'] * 2000 + ['cat2'] * 2000 +
['cat3'] * 2000) # 4 categories
fg = np.array([1] * 2000 + [2] * 2000 + [3] * 2000 + [4] * 2000)
filter_feat = np.array([1] * 6000 + [None] * 2000)
filter_arr = np.array([True] * 6000 + [False] * 2000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fg], [fa], [True], [True],
filter_feature=filter_arr,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, np.log2(3), places=2)
result = _MI([fg], [fa], [False], [True],
filter_feature=filter_arr,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, np.log2(3), places=2)
result = _MI([fg], [filter_feat], [False], [False],
seed=23,
estimate_method=method)
self.assertAlmostEqual(result, (3 / 4) * (np.log2(4 / 3)) + 0.5, places=2)
result = _MI([fg], [filter_feat], [False], [False],
filter_feature=filter_arr,
seed=23,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
def testWeightFeat(self):
np.random.seed(3)
fa = np.array(['cat0'] * 2000 + ['cat1'] * 2000 + ['cat2'] * 2000 +
['cat3'] * 2000) # 4 categories
fg = np.array([1] * 2000 + [2] * 2000 + [3] * 2000 + [4] * 2000)
weight_feat = np.array([1] * 2000 + [0.5] * 2000 + [0.25] * 2000 +
[0] * 2000)
for method in ['smaller_data', 'larger_data']:
result = _MI([fg], [fa], [True], [True],
weight_feature=weight_feat,
seed=20,
estimate_method=method)
self.assertAlmostEqual(result, 7 / 8, delta=1e-2)
result = _MI([fg], [weight_feat], [False], [False],
weight_feature=weight_feat,
seed=23,
estimate_method=method)
self.assertAlmostEqual(result, 7 / 8, delta=1e-2)
def testAssertions(self):
np.random.seed(23)
fx = np.random.random(1000)
fy = np.array([1.] * 1000)
with self.assertRaises(AssertionError):
_MI([], [fy], [False], [False])
with self.assertRaises(AssertionError):
_MI([fx], [], [False], [False])
with self.assertRaises(AssertionError):
_MI(fx, [fy], [False], [False])
with self.assertRaises(AssertionError):
_MI([fx], [fy], [False] * 2, [False])
with self.assertRaises(AssertionError):
_MI([fx], [fy], [False], [False], output_each='False')
def testOutputEachSanityCheck(self):
np.random.seed(23)
fx = np.random.randn(1000)
fy = np.array([1.] * 1000)
fz = np.array([True] * 700 + [False] * 300)
for method in ['smaller_data', 'larger_data']:
result, each_mi = _MI([fx], [fy], [False], [False],
seed=3,
output_each=True,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
self.assertLen(each_mi, 1000)
self.assertLess(max(0, np.mean(each_mi)), 1e-2)
result, each_mi = _MI([fx], [fy], [False], [False],
filter_feature=fz,
seed=4,
output_each=True,
estimate_method=method)
self.assertLess(abs(result), 1e-2)
self.assertLen(each_mi, 700)
self.assertLess(max(0, np.mean(each_mi)), 1e-2)
def testOutputEach(self):
np.random.seed(97)
n = 10000
fx = np.random.randint(0, 8, n)
for method in ['smaller_data', 'larger_data']:
for categorical0, categorical1 in [(True, True), (False, True),
(False, False)]:
# Test categorical vs categorical, ordinal vs categorical, ordinal
# vs ordinal.
result, each_mi = _MI([fx], [fx], [categorical0], [categorical1],
output_each=True,
estimate_method=method,
seed=5)
self.assertAlmostEqual(result, 3, places=1)
self.assertLen(each_mi, n)
self.assertAlmostEqual(np.mean(each_mi), 3, places=1)
self.assertAlmostEqual(
np.sum(each_mi[fx == 0]) / n, 3. / 8, places=None, delta=1e-2)
for method in ['smaller_data', 'larger_data']:
for categorical0, categorical1, categorical2 in [(False, False, True),
(False, True, True)]:
result, each_mi = _MI([fx, fx], [fx], [categorical0, categorical1],
[categorical2],
output_each=True,
estimate_method=method,
seed=9)
self.assertAlmostEqual(result, 3, places=2)
self.assertLen(each_mi, n)
self.assertAlmostEqual(np.mean(each_mi), 3, places=2)
self.assertAlmostEqual(
np.sum(each_mi[fx == 0]) / n, 3. / 8, places=None, delta=1e-2)
def testCategorical(self):
np.random.seed(3)
a = np.array([b'cat0'] * 2000 + [b'cat1'] * 2000 + [b'cat2'] * 2000 +
[b'\xc5\x8cmura'] * 2000) # 4 categories
b = np.random.randn(a.size)
c = np.arange(0.1, 100, 0.001)[:a.size] + 2 * b
d = (
np.random.normal(0.5, 1.0, a.size) +
np.random.normal(-0.5, 1.0, a.size) + np.random.normal(0., 0.3, a.size))
e = np.arange(0.1, 100, 0.001)[:a.size]
# Build some features that repeat N times the same value sequence.
g = np.array([i // (a.size // 8) for i in range(a.size)])
h = np.array([b'cat%d' % (i // (a.size // 16)) for i in range(a.size)])
for method in ['smaller_data', 'larger_data']:
result = _MI([b], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertLess(abs(result), 2e-2)
result = _MI([c], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 0.565, delta=1e+2)
result = _MI([d], [a], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertLess(abs(result), 1e-2)
result = _MI([e], [h], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 4, delta=1e+2)
result = _MI([g], [h], [False], [True],
k=6,
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 3, delta=1e+2)
result = _MI([a, b], [b, a], [True, False], [False, True],
estimate_method=method,
seed=20)
self.assertAlmostEqual(result, 13.15, delta=1e+2)
def testCategoricalOrdinal(self):
np.random.seed(3)
# Feature B has PDF 3/4 in [0, 1] vs 1/4 in [1, 2], and differential entropy
# H(B) = - 3/4 * log(3/4) - 1/4 * log(1/4)
# while, given A, it has conditional entropy
# H(B | A) = 1/2 * H(B | A == 0) + 1/2 * H(B | A == 1)
# H(B | A) = 1/2 * 0. - 1/2 * log(1/2) = - 1/2 * log(1/2)
# hence their mutual information is
# I(A, B) = H(B) - H(B | A) = - 3/4 * log(3/4)
# using whatever log base we're using, in this case base 2.
a = np.array([i % 2 for i in range(1000)])
b = np.array([np.random.random() * (1. + i % 2) for i in range(1000)])
filt = np.array([True if i % 2 else False for i in range(1000)])
for method in ['smaller_data', 'larger_data']:
self.assertAlmostEqual(
-0.75 * np.log2(0.75),
_MI([a], [b], [True], [False], estimate_method=method, seed=20),
delta=2e-2)
# If we filter out 1 of the 2 A labels however, no information is left.
self.assertEqual(
0.,
_MI([a], [b], [True], [False],
estimate_method=method,
seed=20,
filter_feature=filt))
def testAdjustedMutualInformation(self):
np.random.seed(11)
f0 = np.random.randint(0, 10000, 10000)
label = np.array([0, 1] * 5000)
result = mutual_information_util.mutual_information([f0], [label], [True],
[True],
seed=11)
adjusted_result = _AMI([f0], [label], [True], [True], seed=11)
self.assertAlmostEqual(result, 0.625, delta=2e-2)
self.assertAlmostEqual(adjusted_result, 0.0, delta=2e-2)
def testMergeCategorical(self):
actual = mutual_information_util._merge_categorical([
np.array(['a', 'b', 'c']),
np.array(['1', '2', '3']),
np.array(['alpha', 'beta', 'gamma'])
])
self.assertTrue(
np.array_equal(
np.array([b'a:1:alpha', b'b:2:beta', b'c:3:gamma']), actual))
def testEntropyD(self):
discrete_f = np.array(['foo', 'bar', 'baz', 'foo'])
entropy, each = mutual_information_util._entropy_discrete(
discrete_f, np.ones_like(discrete_f, dtype=float))
expected_entropy = -(np.log2(0.5) * 0.5 + np.log2(0.25) * 0.25 * 2)
expected_each = np.array(
[-np.log2(0.5), -np.log2(0.25), -np.log2(0.25), -np.log2(0.5)])
self.assertTrue(np.allclose(expected_entropy, entropy, atol=1e-5))
self.assertTrue(np.allclose(expected_each, each, atol=1e-5))
def testReplaceNoneC(self):
arr = np.array([1.0, 2.0, np.nan])
expected = np.array(
[1.0, 2.0, 2 * 2.0 - 1.0 + mutual_information_util._NONE_NUM])
actual = mutual_information_util._replace_none_categorical(arr)
self.assertTrue(np.array_equal(expected, actual))
def testUnitVarianceScale(self):
arr = np.array([1.0, 2.0, np.nan])
actual = mutual_information_util._unit_variance_scale(arr)
stdev = np.std([1.0, 2.0], ddof=1)
self.assertTrue(
np.allclose(
np.array([(1.0 - 1.5) / stdev, (2 - 1.5) / stdev]),
actual[~np.isnan(actual)],
atol=1e-5))
def testUnitVarianceScale_UniformValues(self):
arr = np.array([1.0, 1.0, np.nan])
expected = np.array([0.0, 0.0, np.nan])
actual = mutual_information_util._unit_variance_scale(arr)
np.testing.assert_equal(actual[np.isnan(actual)],
expected[np.isnan(expected)])
self.assertTrue(
np.allclose(
expected[~np.isnan(expected)], actual[~np.isnan(actual)],
atol=1e-5))
def testFeatureToNumpyArray(self):
feat = np.array([1.0, 2.0, None])
expected = np.array([1.0, 2.0, np.nan])
actual = mutual_information_util._fill_missing_values(feat, False)
np.testing.assert_equal(actual[np.isnan(actual)],
expected[np.isnan(expected)])
np.testing.assert_equal(expected, actual)
feat = np.array([b'a', b'b', None])
expected = np.array([b'a', b'b', np.nan], dtype=object)
actual = mutual_information_util._fill_missing_values(feat, True)
self.assertEqual([
i for i, v in enumerate(actual) if isinstance(v, float) and np.isnan(v)
], [
i for i, v in enumerate(expected)
if isinstance(v, float) and np.isnan(v)
])
self.assertEqual([v for v in actual if not isinstance(v, float)],
[v for v in expected if not isinstance(v, float)])
def testDiscreteLabelsAppearingExactlyOnce(self):
feat0 = np.arange(10)
feat1 = np.arange(10, 20).astype(int)
with self.assertRaisesRegex(
ValueError, '.* tuples .* discrete features .* are all unique.*'):
mutual_information_util._mi_for_arrays([feat0], [], [], [feat1],
np.ones_like(feat1))
if __name__ == '__main__':
absltest.main()
| |
# Copyright (c) 2015-2022 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import traceback
import ctypes
import abc
# Binary Ninja components
import binaryninja
from .log import log_error
from . import databuffer
from . import _binaryninjacore as core
from .enums import TransformType
class _TransformMetaClass(type):
def __iter__(self):
binaryninja._init_plugins()
count = ctypes.c_ulonglong()
xforms = core.BNGetTransformTypeList(count)
assert xforms is not None, "core.BNGetTransformTypeList returned None"
try:
for i in range(0, count.value):
yield Transform(xforms[i])
finally:
core.BNFreeTransformTypeList(xforms)
def __getitem__(cls, name):
binaryninja._init_plugins()
xform = core.BNGetTransformByName(name)
if xform is None:
raise KeyError("'%s' is not a valid transform" % str(name))
return Transform(xform)
class TransformParameter:
def __init__(self, name, long_name=None, fixed_length=0):
self._name = name
if long_name is None:
self._long_name = name
else:
self._long_name = long_name
self._fixed_length = fixed_length
def __repr__(self):
return "<TransformParameter: {} fixed length: {}>".format(self._long_name, self._fixed_length)
@property
def name(self):
"""(read-only)"""
return self._name
@property
def long_name(self):
"""(read-only)"""
return self._long_name
@property
def fixed_length(self):
"""(read-only)"""
return self._fixed_length
class Transform(metaclass=_TransformMetaClass):
"""
``class Transform`` allows users to implement custom transformations. New transformations may be added at runtime,
so an instance of a transform is created like::
>>> list(Transform)
[<transform: Zlib>, <transform: StringEscape>, <transform: RawHex>, <transform: HexDump>, <transform: Base64>, <transform: Reverse>, <transform: CArray08>, <transform: CArrayA16>, <transform: CArrayA32>, <transform: CArrayA64>, <transform: CArrayB16>, <transform: CArrayB32>, <transform: CArrayB64>, <transform: IntList08>, <transform: IntListA16>, <transform: IntListA32>, <transform: IntListA64>, <transform: IntListB16>, <transform: IntListB32>, <transform: IntListB64>, <transform: MD4>, <transform: MD5>, <transform: SHA1>, <transform: SHA224>, <transform: SHA256>, <transform: SHA384>, <transform: SHA512>, <transform: AES-128 ECB>, <transform: AES-128 CBC>, <transform: AES-256 ECB>, <transform: AES-256 CBC>, <transform: DES ECB>, <transform: DES CBC>, <transform: Triple DES ECB>, <transform: Triple DES CBC>, <transform: RC2 ECB>, <transform: RC2 CBC>, <transform: Blowfish ECB>, <transform: Blowfish CBC>, <transform: CAST ECB>, <transform: CAST CBC>, <transform: RC4>, <transform: XOR>]
>>> sha512=Transform['SHA512']
>>> rawhex=Transform['RawHex']
>>> rawhex.encode(sha512.encode("test string"))
'10e6d647af44624442f388c2c14a787ff8b17e6165b83d767ec047768d8cbcb71a1a3226e7cc7816bc79c0427d94a9da688c41a3992c7bf5e4d7cc3e0be5dbac'
Note that some transformations take additional parameters (most notably encryption ones that require a 'key' parameter passed via a dict):
>>> xor=Transform['XOR']
>>> rawhex=Transform['RawHex']
>>> xor.encode("Original Data", {'key':'XORKEY'})
>>> rawhex.encode(xor.encode("Original Data", {'key':'XORKEY'}))
b'173d3b2c2c373923720f242d39'
"""
transform_type = None
name = None
long_name = None
group = None
parameters = []
_registered_cb = None
def __init__(self, handle):
if handle is None:
self._cb = core.BNCustomTransform()
self._cb.context = 0
self._cb.getParameters = self._cb.getParameters.__class__(self._get_parameters)
self._cb.freeParameters = self._cb.freeParameters.__class__(self._free_parameters)
self._cb.decode = self._cb.decode.__class__(self._decode)
self._cb.encode = self._cb.encode.__class__(self._encode)
self._pending_param_lists = {}
self.type = self.__class__.transform_type
if not isinstance(self.type, str):
assert self.type is not None, "Transform Type is None"
self.type = TransformType(self.type)
self.name = self.__class__.name
self.long_name = self.__class__.long_name
self.group = self.__class__.group
self.parameters = self.__class__.parameters
else:
self.handle = handle
self.type = TransformType(core.BNGetTransformType(self.handle))
self.name = core.BNGetTransformName(self.handle)
self.long_name = core.BNGetTransformLongName(self.handle)
self.group = core.BNGetTransformGroup(self.handle)
count = ctypes.c_ulonglong()
params = core.BNGetTransformParameterList(self.handle, count)
assert params is not None, "core.BNGetTransformParameterList returned None"
self.parameters = []
for i in range(0, count.value):
self.parameters.append(TransformParameter(params[i].name, params[i].longName, params[i].fixedLength))
core.BNFreeTransformParameterList(params, count.value)
def __repr__(self):
return "<transform: %s>" % self.name
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(ctypes.addressof(self.handle.contents))
@classmethod
def register(cls):
binaryninja._init_plugins()
if cls.name is None:
raise ValueError("transform 'name' is not defined")
if cls.long_name is None:
cls.long_name = cls.name
if cls.transform_type is None:
raise ValueError("transform 'transform_type' is not defined")
if cls.group is None:
cls.group = ""
xform = cls(None)
cls._registered_cb = xform._cb
xform.handle = core.BNRegisterTransformType(cls.transform_type, cls.name, cls.long_name, cls.group, xform._cb)
def _get_parameters(self, ctxt, count):
try:
count[0] = len(self.parameters)
param_buf = (core.BNTransformParameterInfo * len(self.parameters))()
for i in range(0, len(self.parameters)):
param_buf[i].name = self.parameters[i].name
param_buf[i].longName = self.parameters[i].long_name
param_buf[i].fixedLength = self.parameters[i].fixed_length
result = ctypes.cast(param_buf, ctypes.c_void_p)
self._pending_param_lists[result.value] = (result, param_buf)
return result.value
except:
log_error(traceback.format_exc())
count[0] = 0
return None
def _free_parameters(self, params, count):
try:
buf = ctypes.cast(params, ctypes.c_void_p)
if buf.value not in self._pending_param_lists:
raise ValueError("freeing parameter list that wasn't allocated")
del self._pending_param_lists[buf.value]
except:
log_error(traceback.format_exc())
def _decode(self, ctxt, input_buf, output_buf, params, count):
try:
input_obj = databuffer.DataBuffer(handle=core.BNDuplicateDataBuffer(input_buf))
param_map = {}
for i in range(0, count):
data = databuffer.DataBuffer(handle=core.BNDuplicateDataBuffer(params[i].value))
param_map[params[i].name] = bytes(data)
result = self.perform_decode(bytes(input_obj), param_map)
if result is None:
return False
result = bytes(result)
core.BNSetDataBufferContents(output_buf, result, len(result))
return True
except:
log_error(traceback.format_exc())
return False
def _encode(self, ctxt, input_buf, output_buf, params, count):
try:
input_obj = databuffer.DataBuffer(handle=core.BNDuplicateDataBuffer(input_buf))
param_map = {}
for i in range(0, count):
data = databuffer.DataBuffer(handle=core.BNDuplicateDataBuffer(params[i].value))
param_map[params[i].name] = bytes(data)
result = self.perform_encode(bytes(input_obj), param_map)
if result is None:
return False
result = bytes(result)
core.BNSetDataBufferContents(output_buf, result, len(result))
return True
except:
log_error(traceback.format_exc())
return False
@abc.abstractmethod
def perform_decode(self, data, params):
if self.type == TransformType.InvertingTransform:
return self.perform_encode(data, params)
return None
@abc.abstractmethod
def perform_encode(self, data, params):
return None
def decode(self, input_buf, params={}):
if isinstance(input_buf, int) or isinstance(input_buf, int):
return None
input_buf = databuffer.DataBuffer(input_buf)
output_buf = databuffer.DataBuffer()
keys = list(params.keys())
param_buf = (core.BNTransformParameter * len(keys))()
data = []
for i in range(0, len(keys)):
data.append(databuffer.DataBuffer(params[keys[i]]))
param_buf[i].name = keys[i]
param_buf[i].value = data[i].handle
if not core.BNDecode(self.handle, input_buf.handle, output_buf.handle, param_buf, len(keys)):
return None
return bytes(output_buf)
def encode(self, input_buf, params={}):
if isinstance(input_buf, int) or isinstance(input_buf, int):
return None
input_buf = databuffer.DataBuffer(input_buf)
output_buf = databuffer.DataBuffer()
keys = list(params.keys())
param_buf = (core.BNTransformParameter * len(keys))()
data = []
for i in range(0, len(keys)):
data.append(databuffer.DataBuffer(params[keys[i]]))
param_buf[i].name = keys[i]
param_buf[i].value = data[i].handle
if not core.BNEncode(self.handle, input_buf.handle, output_buf.handle, param_buf, len(keys)):
return None
return bytes(output_buf)
| |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.tests import base
from neutron.tests.unit import test_api_v2
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.remove_chain(ingress_chain),
mock.call.remove_chain(egress_chain),
mock.call.remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from nvd3 import lineChart
from nvd3 import lineWithFocusChart
from nvd3 import stackedAreaChart
from nvd3 import multiBarHorizontalChart
from nvd3 import linePlusBarChart
from nvd3 import cumulativeLineChart
from nvd3 import scatterChart
from nvd3 import discreteBarChart
from nvd3 import pieChart
from nvd3 import multiBarChart
from nvd3 import linePlusBarWithFocusChart
from nvd3.NVD3Chart import stab
from nvd3.translator import Function, AnonymousFunction, Assignment
import random
import unittest
import datetime
import time
class ChartTest(unittest.TestCase):
def test_chartWithBadName(self):
name = "Chart with spaces"
chart = lineChart(name=name, date=True, height=350)
chart.buildhtml()
assert(" " not in chart.name)
assert("spaces" in chart.name)
def test_lineWithFocusChart(self):
"""Test Line With Focus Chart"""
type = "lineWithFocusChart"
chart = lineWithFocusChart(name=type, date=True, height=350)
nb_element = 100
xdata = list(range(nb_element))
xdata = [1365026400000 + x * 100000 for x in xdata]
ydata = [i + random.randint(-10, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
chart.add_serie(y=ydata, x=xdata)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
def test_lineChart(self):
"""Test Line Chart"""
type = "lineChart"
chart = lineChart(name=type, date=True, height=350)
nb_element = 100
xdata = list(range(nb_element))
xdata = [1365026400000 + x * 100000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
chart.add_serie(y=ydata, x=xdata)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
#extra tests
chart.buildcontent()
chart.buildhtmlheader()
def test_lineChart_tooltip(self):
"""Test Line Chart"""
type = "lineChart"
chart = lineChart(name=type, date=True, height=350)
nb_element = 100
xdata = list(range(nb_element))
xdata = [1365026400000 + x * 100000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
kwargs1 = {'color': 'green'}
kwargs2 = {'color': 'red'}
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " random values"}}
chart.add_serie(name="Random X-Axis", y=ydata, x=xdata, extra=extra_serie, **kwargs1)
extra_serie = {"tooltip": {"y_start": "", "y_end": " double values"}}
chart.add_serie(name="Double X-Axis", y=ydata2, x=xdata, extra=extra_serie, **kwargs2)
chart.buildhtml()
def test_linePlusBarChart(self):
"""Test line Plus Bar Chart"""
type = "linePlusBarChart"
chart = linePlusBarChart(name=type, date=True, height=350)
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
nb_element = 100
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [i + random.randint(1, 10) for i in reversed(list(range(nb_element)))]
kwargs = {}
kwargs['bar'] = True
chart.add_serie(y=ydata, x=xdata, **kwargs)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
def test_stackedAreaChart(self):
"""Test Stacked Area Chart"""
type = "stackedAreaChart"
chart = stackedAreaChart(name=type, height=400)
nb_element = 100
xdata = list(range(nb_element))
xdata = [100 + x for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
chart.add_serie(y=ydata, x=xdata)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
def test_MultiBarChart(self):
"""Test Multi Bar Chart"""
type = "MultiBarChart"
chart = multiBarChart(name=type, height=400)
nb_element = 10
xdata = list(range(nb_element))
ydata = [random.randint(1, 10) for i in range(nb_element)]
extra = {"type": "bar", "yaxis": 1}
chart.add_serie(y=ydata, x=xdata, extra=extra)
chart.buildhtml()
def test_multiBarHorizontalChart(self):
"""Test multi Bar Horizontal Chart"""
type = "multiBarHorizontalChart"
chart = multiBarHorizontalChart(name=type, height=350)
nb_element = 10
xdata = list(range(nb_element))
ydata = [random.randint(-10, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
chart.add_serie(y=ydata, x=xdata)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
def test_cumulativeLineChart(self):
"""Test Cumulative Line Chart"""
type = "cumulativeLineChart"
chart = cumulativeLineChart(name=type, height=400)
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
nb_element = 100
xdata = list(range(nb_element))
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
chart.add_serie(y=ydata, x=xdata)
chart.add_serie(y=ydata2, x=xdata)
chart.buildhtml()
def test_scatterChart(self):
"""Test Scatter Chart"""
type = "scatterChart"
chart = scatterChart(name=type, date=True, height=350)
nb_element = 100
xdata = [i + random.randint(1, 10) for i in range(nb_element)]
ydata = [i * random.randint(1, 10) for i in range(nb_element)]
ydata2 = [x * 2 for x in ydata]
ydata3 = [x * 5 for x in ydata]
kwargs1 = {'shape': 'circle', 'size': '1'}
kwargs2 = {'shape': 'cross', 'size': '10'}
kwargs3 = {'shape': 'triangle-up', 'size': '100'}
chart.add_serie(y=ydata, x=xdata, **kwargs1)
chart.add_serie(y=ydata2, x=xdata, **kwargs2)
chart.add_serie(y=ydata3, x=xdata, **kwargs3)
chart.buildhtml()
def test_discreteBarChart(self):
"""Test discrete Bar Chart"""
type = "discreteBarChart"
chart = discreteBarChart(name=type, height=350)
xdata = ["A", "B", "C", "D", "E", "F", "G"]
ydata = [3, 12, -10, 5, 35, -7, 2]
chart.add_serie(y=ydata, x=xdata)
chart.buildhtml()
# We don't modify the xAxis, so make sure that it's not invoked.
assert("chart.xAxis" not in chart.htmlcontent)
def test_pieChart(self):
"""Test Pie Chart"""
type = "pieChart"
chart = pieChart(name=type, color_category='category20c', height=400, width=400)
xdata = ["Orange", "Banana", "Pear", "Kiwi", "Apple", "Strawberry", "Pineapple"]
color_list = ['orange', 'yellow', '#C5E946', '#95b43f', 'red', '#FF2259', '#F6A641']
extra_serie = {"tooltip": {"y_start": "", "y_end": " cal"}, "color_list": color_list}
ydata = [3, 4, 0, 1, 5, 7, 3]
chart.add_serie(y=ydata, x=xdata, extra=extra_serie)
chart.buildhtml()
def test_donutPieChart(self):
"""Test Donut Pie Chart"""
type = "pieChart"
chart = pieChart(name=type, height=400, width=400, donut=True, donutRatio=0.2)
xdata = ["Orange", "Banana", "Pear", "Kiwi", "Apple", "Strawberry", "Pineapple"]
ydata = [3, 4, 0, 1, 5, 7, 3]
chart.add_serie(y=ydata, x=xdata)
chart.buildhtml()
def test_lineplusbarwithfocuschart(self):
"Test LinePlusBar With FocusChart"
type = "linePlusBarWithFocusChart"
chart = linePlusBarWithFocusChart(name=type, color_category='category20b',
x_is_date=True, x_axis_format="%d %b %Y")
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
nb_element = 100
xdata = list(range(nb_element))
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
#prepare series
xdata = [start_time + x * 1000000000 for x in xdata]
ydata = [i + random.randint(-10, 10) for i in range(nb_element)]
ydata2 = [200 - i + random.randint(-10, 10) for i in range(nb_element)]
extra_serie_1 = {
"tooltip": {"y_start": "$ ", "y_end": ""},
"date_format": "%d %b %Y",
}
kwargs = {"bar": "true"}
chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie_1, **kwargs)
extra_serie_2 = {
"tooltip": {"y_start": "$ ", "y_end": ""},
"date_format": "%d %b %Y",
}
chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie_2)
chart.buildhtml()
class FuncTest(unittest.TestCase):
def test_stab(self):
self.assertEqual(" ", stab(1))
class TranslatorTest(unittest.TestCase):
def test_pieChart(self):
func = Function('nv').addGraph(
AnonymousFunction('', Assignment(
'chart',
Function('nv').models.pieChart().x(
AnonymousFunction('d', 'return d.label;')
).y(AnonymousFunction('d', 'return d.value;')
).showLabels('true')
)
)
)
self.assertEqual(str(func),
'nv.addGraph(function() { var chart = '
'nv.models.pieChart().x(function(d) { return d.label; '
'}).y(function(d) { return d.value; }).showLabels(true); })')
if __name__ == '__main__':
unittest.main()
# Usage
# python tests.py -v
| |
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
import ddt
from zaqar.tests.functional import base
from zaqar.tests.functional import helpers
@ddt.ddt
class TestMessages(base.V1FunctionalTestBase):
"""Tests for Messages."""
server_class = base.ZaqarServer
def setUp(self):
super(TestMessages, self).setUp()
self.queue = uuid.uuid1()
self.queue_url = ("{url}/{version}/queues/{queue}".format(
url=self.cfg.zaqar.url,
version="v1",
queue=self.queue))
self.client.put(self.queue_url)
self.message_url = self.queue_url + '/messages'
self.client.set_base_url(self.message_url)
def tearDown(self):
self.client.delete(self.queue_url)
super(TestMessages, self).tearDown()
def _post_large_bulk_insert(self, offset):
"""Insert just under than max allowed messages."""
message1 = {"body": '', "ttl": 300}
message2 = {"body": '', "ttl": 120}
doc = [message1, message2]
overhead = len(json.dumps(doc))
half_size = (self.limits.max_messages_post_size - overhead) // 2
message1['body'] = helpers.generate_random_string(half_size)
message2['body'] = helpers.generate_random_string(half_size + offset)
return self.client.post(data=doc)
def test_message_single_insert(self):
"""Insert Single Message into the Queue.
This test also verifies that claimed messages are
retuned (or not) depending on the include_claimed flag.
"""
doc = helpers.create_message_body(messagecount=1)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
response_headers = set(result.headers.keys())
self.assertIsSubset(self.headers_response_with_body, response_headers)
# GET on posted message
href = result.json()['resources'][0]
url = self.cfg.zaqar.url + href
result = self.client.get(url)
self.assertEqual(200, result.status_code)
# Compare message metadata
result_body = result.json()['body']
posted_metadata = doc[0]['body']
self.assertEqual(posted_metadata, result_body)
# Post a claim & verify the include_claimed flag.
url = self.queue_url + '/claims'
doc = {"ttl": 300, "grace": 100}
result = self.client.post(url, data=doc)
self.assertEqual(201, result.status_code)
params = {'include_claimed': True,
'echo': True}
result = self.client.get(params=params)
self.assertEqual(200, result.status_code)
response_message_body = result.json()["messages"][0]["body"]
self.assertEqual(posted_metadata, response_message_body)
# By default, include_claimed = false
result = self.client.get(self.message_url)
self.assertEqual(204, result.status_code)
test_message_single_insert.tags = ['smoke', 'positive']
def test_message_bulk_insert(self):
"""Bulk Insert Messages into the Queue."""
message_count = self.limits.max_messages_per_page
doc = helpers.create_message_body(messagecount=message_count)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# GET on posted messages
location = result.headers['location']
url = self.cfg.zaqar.url + location
result = self.client.get(url)
self.assertEqual(200, result.status_code)
self.skipTest('Bug #1273335 - Get set of messages returns wrong hrefs '
'(happens randomly)')
# Verify that the response json schema matches the expected schema
self.assertSchema(result.json(), 'message_get_many')
# Compare message metadata
result_body = [result.json()[i]['body']
for i in range(len(result.json()))]
result_body.sort()
posted_metadata = [doc[i]['body']
for i in range(message_count)]
posted_metadata.sort()
self.assertEqual(posted_metadata, result_body)
test_message_bulk_insert.tags = ['smoke', 'positive']
@ddt.data({}, {'limit': 5})
def test_get_message(self, params):
"""Get Messages."""
expected_msg_count = params.get('limit', 10)
# Test Setup
doc = helpers.create_message_body(
messagecount=self.limits.max_messages_per_page)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
url = ''
params['echo'] = True
# Follow the hrefs & perform GET, till the end of messages i.e. http
# 204
while result.status_code in [201, 200]:
result = self.client.get(url, params=params)
self.assertIn(result.status_code, [200, 204])
if result.status_code == 200:
actual_msg_count = len(result.json()['messages'])
self.assertMessageCount(actual_msg_count, expected_msg_count)
self.assertSchema(result.json(), 'message_list')
href = result.json()['links'][0]['href']
url = self.cfg.zaqar.url + href
self.assertEqual(204, result.status_code)
test_get_message.tags = ['smoke', 'positive']
def test_message_delete(self):
"""Delete Message."""
# Test Setup
doc = helpers.create_message_body(messagecount=1)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Delete posted message
href = result.json()['resources'][0]
url = self.cfg.zaqar.url + href
result = self.client.delete(url)
self.assertEqual(204, result.status_code)
result = self.client.get(url)
self.assertEqual(404, result.status_code)
test_message_delete.tags = ['smoke', 'positive']
def test_message_bulk_delete(self):
"""Bulk Delete Messages."""
doc = helpers.create_message_body(messagecount=10)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Delete posted messages
location = result.headers['Location']
url = self.cfg.zaqar.url + location
result = self.client.delete(url)
self.assertEqual(204, result.status_code)
result = self.client.get(url)
self.assertEqual(204, result.status_code)
test_message_bulk_delete.tags = ['smoke', 'positive']
def test_message_delete_nonexisting(self):
"""Delete non-existing Messages."""
result = self.client.delete('/non-existing')
self.assertEqual(204, result.status_code)
test_message_delete_nonexisting.tags = ['negative']
def test_message_partial_delete(self):
"""Delete Messages will be partially successful."""
doc = helpers.create_message_body(messagecount=3)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Delete posted message
location = result.headers['Location']
url = self.cfg.zaqar.url + location
url += ',nonexisting'
result = self.client.delete(url)
self.assertEqual(204, result.status_code)
test_message_partial_delete.tags = ['negative']
def test_message_partial_get(self):
"""Get Messages will be partially successful."""
doc = helpers.create_message_body(messagecount=3)
result = self.client.post(data=doc)
self.assertEqual(201, result.status_code)
# Get posted message and a nonexisting message
location = result.headers['Location']
url = self.cfg.zaqar.url + location
url += ',nonexisting'
result = self.client.get(url)
self.assertEqual(200, result.status_code)
self.assertSchema(result.json(), "message_get_many")
test_message_partial_get.tags = ['negative']
@ddt.data(-10, -1, 0)
def test_message_bulk_insert_large_bodies(self, offset):
"""Insert just under than max allowed messages."""
result = self._post_large_bulk_insert(offset)
self.assertEqual(201, result.status_code)
test_message_bulk_insert_large_bodies.tags = ['positive']
@ddt.data(1, 10)
def test_message_bulk_insert_large_bodies_(self, offset):
"""Insert just under than max allowed messages."""
result = self._post_large_bulk_insert(offset)
self.assertEqual(400, result.status_code)
test_message_bulk_insert_large_bodies_.tags = ['negative']
def test_message_bulk_insert_oversized(self):
"""Insert more than max allowed size."""
doc = '[{{"body": "{0}", "ttl": 300}}, {{"body": "{1}", "ttl": 120}}]'
overhead = len(doc.format('', ''))
half_size = (self.limits.max_messages_post_size - overhead) // 2
doc = doc.format(helpers.generate_random_string(half_size),
helpers.generate_random_string(half_size + 1))
result = self.client.post(data=doc)
self.assertEqual(400, result.status_code)
test_message_bulk_insert_oversized.tags = ['negative']
@ddt.data(10000000000000000000, -100, 0, 30, -10000000000000000000)
def test_message_get_invalid_limit(self, limit):
"""Get Messages with invalid value for limit.
Allowed values for limit are 0 < limit <= 20(configurable).
"""
params = {'limit': limit}
result = self.client.get(params=params)
self.assertEqual(400, result.status_code)
test_message_get_invalid_limit.tags = ['negative']
def test_message_bulk_delete_negative(self):
"""Delete more messages than allowed in a single request.
By default, max messages that can be deleted in a single
request is 20.
"""
url = (self.message_url + '?ids=' +
','.join(str(i) for i in
range(self.limits.max_messages_per_page + 1)))
result = self.client.delete(url)
self.assertEqual(400, result.status_code)
test_message_bulk_delete_negative.tags = ['negative']
def test_message_bulk_get_negative(self):
"""GET more messages by id than allowed in a single request.
By default, max messages that can be fetched in a single
request is 20.
"""
url = (self.message_url + '?ids=' +
','.join(str(i) for i in
range(self.limits.max_messages_per_page + 1)))
result = self.client.get(url)
self.assertEqual(400, result.status_code)
test_message_bulk_get_negative.tags = ['negative']
def test_get_messages_malformed_marker(self):
"""Get messages with non-existing marker."""
url = self.message_url + '?marker=invalid'
result = self.client.get(url)
self.assertEqual(204, result.status_code)
test_get_messages_malformed_marker.tags = ['negative']
@ddt.data(None, '1234', 'aa2-bb3',
'103e09c6-31b7-11e3-86bc-b8ca3ad0f5d81',
'103e09c6-31b7-11e3-86bc-b8ca3ad0f5d')
def test_get_messages_invalid_client_id(self, client_id):
"""Get messages with invalid client id."""
url = self.message_url
header = helpers.create_zaqar_headers(self.cfg)
header['Client-ID'] = client_id
result = self.client.get(url, headers=header)
self.assertEqual(400, result.status_code)
test_get_messages_invalid_client_id.tags = ['negative']
def test_query_non_existing_message(self):
"""Get Non Existing Message."""
path = '/non-existing-message'
result = self.client.get(path)
self.assertEqual(404, result.status_code)
test_query_non_existing_message.tags = ['negative']
def test_query_non_existing_message_set(self):
"""Get Set of Non Existing Messages."""
path = '?ids=not_there1,not_there2'
result = self.client.get(path)
self.assertEqual(204, result.status_code)
test_query_non_existing_message_set.tags = ['negative']
def test_delete_non_existing_message(self):
"""Delete Non Existing Message."""
path = '/non-existing-message'
result = self.client.delete(path)
self.assertEqual(204, result.status_code)
test_delete_non_existing_message.tags = ['negative']
| |
from random import random, choice
from apiritif import random_string
from bzt.modules.aggregator import ConsolidatingAggregator, DataPoint, KPISet, SAMPLE_STATES, AGGREGATED_STATES
from bzt.utils import to_json, BetterDict
from tests.unit import BZTestCase, EngineEmul
from tests.unit.mocks import r, MockReader, MockListener
def get_success_reader(offset=0):
mock = MockReader()
mock.data.append((1 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((2 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((2 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((3 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((3 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((4 + offset, "third", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((4 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((6 + offset, "second", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((6 + offset, "third", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((6 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((5 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
return mock
def get_success_reader_alot(prefix='', offset=0):
mock = MockReader()
for x in range(2, 100):
rnd = int(random() * x)
mock.data.append((x + offset, prefix + random_string(1 + rnd), 1, r(), r(), r(), 200, '', '', 0))
return mock
def get_success_reader_selected_labels(offset=0):
mock = MockReader()
labels = ['http://blazedemo.com/reserve.php',
'http://blazedemo.com/purchase.php',
'http://blazedemo.com/vacation.html',
'http://blazedemo.com/confirmation.php',
'http://blazedemo.com/another.php']
for x in range(2, 200):
mock.data.append((x + offset, choice(labels), 1, r(), r(), r(), 200, '', '', 0))
return mock
def random_url(target_len):
base = 'http://site.com/?foo='
return base + random_string(target_len - len(base))
def get_success_reader_shrinking_labels(max_label_size=20, count=500):
mock = MockReader()
half_size = max_label_size // 2
for x in range(2, count):
target_size = max_label_size - int(float(half_size) * float(x) / float(count))
label = random_url(target_size)
mock.data.append((x, label, 1, r(), r(), r(), 200, '', '', 0))
return mock
def get_fail_reader(offset=0):
mock = MockReader()
mock.data.append((1 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((2 + offset, "first", 1, r(), r(), r(), 200, 'FAILx1', '', 0))
mock.data.append((5 + offset, "first", 1, r(), r(), r(), 200, None, '', 0))
mock.data.append((7 + offset, "second", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((3 + offset, "first", 1, r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((6 + offset, "second", 1, r(), r(), r(), 200, 'unique FAIL', '', 0))
return mock
def get_fail_reader_alot(offset=0):
mock = MockReader()
for x in range(2, 200):
rnd = int(random() * x)
mock.data.append((x + offset, "first", 1, r(), r(), r(), 200, (random_string(1 + rnd)), '', 0))
return mock
class TestTools(BZTestCase):
def test_mock(self):
# check mock reader
reader = get_success_reader()
reader.buffer_scale_idx = '90.0'
first = list(reader.datapoints())
second = list(reader.datapoints(True))
self.assertEquals([1, 2, 3, 4], [x[DataPoint.TIMESTAMP] for x in first])
self.assertEquals([5, 6], [x[DataPoint.TIMESTAMP] for x in second])
for point in first + second:
self.assertIn("", point[DataPoint.CURRENT])
def test_merging(self):
dst = DataPoint(0)
src = DataPoint(0)
src[DataPoint.CUMULATIVE].setdefault('', KPISet())
src[DataPoint.CUMULATIVE][''].sum_rt = 0.5
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 1
dst.merge_point(src)
self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 3
dst.merge_point(src)
self.assertEquals(4, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
self.assertEquals(1, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.25, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 6
dst.merge_point(src)
self.assertEquals(10, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
self.assertEquals(1.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
self.assertEquals(0.15, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
class TestConsolidatingAggregator(BZTestCase):
def setUp(self):
super(TestConsolidatingAggregator, self).setUp()
self.obj = ConsolidatingAggregator()
self.obj.engine = EngineEmul()
self.obj.engine.aggregator = self.obj
def test_extend_data_avg(self):
self.obj.settings['extend-aggregation'] = True
reader = MockReader()
watcher = MockReader()
reader.buffer_scale_idx = '100.0'
# data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
reader.data.append((1, "a", 1, 1, 1, 1, '200', None, '', 1))
reader.data.append((2, "b", 1, 2, 2, 2, '200', 'OK', '', 2))
reader.data.append((2, "b", 1, 3, 3, 3, '404', "Not Found", '', 3))
reader.data.append((2, "c", 1, 4, 4, 4, '200', None, '', 4))
reader.data.append((3, "d", 1, 5, 5, 5, '200', None, '', 5))
reader.data.append((5, "b", 1, 6, 6, 6, '200', None, '', 6))
reader.data.append((5, "c", 1, 7, 7, 7, '200', None, '', 7))
self.obj.add_underling(reader)
self.obj.add_listener(watcher)
self.obj.prepare()
self.obj.startup()
self.obj.check()
self.obj.shutdown()
self.obj.post_process()
converted_data = [self.obj.converter(dp) for dp in watcher.results]
a, overall = (converted_data[0]["current"][key]["success"]["avg_rt"] for key in ("a", ""))
self.assertEqual(a, overall)
b, c, overall = (converted_data[-1]["current"][key]["success"]["avg_rt"] for key in ("b", "c", ""))
self.assertEqual(overall, (b + c) / 2.0)
def test_extend_data(self):
# test migrated from taurus-cloud (LDJSONExtractor tests)
# check aggregated results for the following hierarchy:
# {...
# 'current': {
# <label>:
# {'success':{..}, 'http_errors':{..}, 'jmeter_errors':{..},
# 'success_jmeter_errors':{..}, 'http_errors_jmeter_errors':{..}, 'success_http_errors':{..},
# '':{..}}},
# '': <the same states>} # end of 'current' record
# ...}
self.obj.settings['extend-aggregation'] = True
reader = MockReader()
watcher = MockListener()
watcher.engine = self.obj.engine
reader.buffer_scale_idx = '100.0'
# data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
reader.data.append((1, "a", 1, 1, 1, 1, '200', None, '', 1))
reader.data.append((2, "b", 1, 2, 2, 2, '200', 'OK', '', 2))
reader.data.append((2, "b", 1, 3, 3, 3, '404', "Not Found", '', 3))
reader.data.append((2, "c", 1, 4, 4, 4, '200', None, '', 4))
reader.data.append((3, "d", 1, 5, 5, 5, '200', None, '', 5))
reader.data.append((5, "b", 1, 6, 6, 6, '200', None, '', 6))
reader.data.append((5, "c", 1, 7, 7, 7, '200', None, '', 7))
original_labels = list(d[1] for d in reader.data)
self.obj.add_underling(reader)
self.obj.add_listener(watcher)
self.obj.prepare()
self.obj.startup()
self.obj.check()
self.obj.shutdown()
self.obj.post_process()
self.assertEqual(4, len(watcher.results))
allowed_states = set(SAMPLE_STATES + AGGREGATED_STATES + (ConsolidatingAggregator.OVERALL_STATE,))
for dp in watcher.results:
written_kpis = dp['current']
for label in written_kpis:
self.assertIn(label, original_labels + [''], f"Wrong original label: {label}")
for state in written_kpis[label].keys():
self.assertIn(state, allowed_states, f"Wrong state '{state}' for label '{label}'")
def test_two_executions(self):
self.obj.track_percentiles = [0, 50, 100]
self.obj.prepare()
underling1 = get_success_reader()
underling2 = get_success_reader()
self.obj.add_underling(underling1)
self.obj.add_underling(underling2)
cnt = 0
for _ in range(1, 10):
for point in self.obj.datapoints():
self.assertEqual(2, len(point[DataPoint.SUBRESULTS]))
overall = point[DataPoint.CURRENT]['']
self.assertEquals(2, overall[KPISet.CONCURRENCY])
self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
cnt += 1
self.assertEquals(2, cnt)
def test_new_aggregator(self):
# aggregator's config
self.obj.set_aggregation(True)
reader = MockReader()
watcher = MockReader()
# executor/reporter prepare level
self.obj.add_underling(reader)
self.obj.add_listener(watcher)
# send rules to underlings
self.obj.startup()
reader.buffer_scale_idx = '100.0'
# data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
reader.data.append((1, "a", 1, 1, 1, 1, '200', None, '', 0))
reader.data.append((2, "b", 1, 2, 2, 2, '200', 'OK', '', 0))
reader.data.append((2, "b", 1, 3, 3, 3, '404', "Not Found", '', 0))
reader.data.append((2, "c", 1, 4, 4, 4, '200', None, '', 0))
reader.data.append((3, "d", 1, 5, 5, 5, '200', None, '', 0))
reader.data.append((4, "b", 1, 6, 6, 6, '200', None, '', 0))
# let's collect data to seconds and send something aggregated to watcher
self.obj.shutdown()
self.obj.post_process()
data_points = watcher.results[-1][DataPoint.CUMULATIVE]
self.assertEquals(7, len(data_points))
sample_labels = {'a-success', 'b-success', 'b-jmeter_errors', 'b-http_errors', 'c-success', 'd-success', ''}
self.assertEquals(sample_labels, set(data_points.keys()))
def test_errors_cumulative(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader = get_fail_reader()
self.obj.add_underling(reader)
self.obj.shutdown()
self.obj.post_process()
cum_dict = self.obj.underlings[0].cumulative
first_err_ids = [id(err) for err in cum_dict['first']['errors']]
second_err_ids = [id(err) for err in cum_dict['second']['errors']]
total_err_ids = [id(err) for err in cum_dict['']['errors']]
all_ids = first_err_ids + second_err_ids + total_err_ids
self.assertEqual(len(all_ids), len(set(all_ids)))
for label in cum_dict:
data = cum_dict[label]
total_errors_count = sum(err['cnt'] for err in data['errors'])
self.assertEqual(data['fail'], total_errors_count)
def test_labels_variety(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader1 = get_success_reader()
reader2 = get_success_reader_alot()
self.obj.log.info(len(reader1.data) + len(reader2.data))
self.obj.generalize_labels = 25
self.obj.add_underling(reader1)
self.obj.add_underling(reader2)
self.obj.shutdown()
self.obj.post_process()
cum_dict = self.obj.cumulative
len_limit = (self.obj.generalize_labels + 1) # due to randomness, it it can go a bit higher than limit
labels = list(cum_dict.keys())
self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2) # assert that it's at least half full
self.assertLessEqual(len(labels), len_limit + 1) # allow +1 label because '' is cumulative
def test_labels_constant_part(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader = get_success_reader_alot(prefix='http://blazedemo.com/?r=')
self.obj.log.info(len(reader.data))
self.obj.generalize_labels = 25
self.obj.add_underling(reader)
self.obj.shutdown()
self.obj.post_process()
cum_dict = self.obj.cumulative
labels = list(cum_dict.keys())
self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2) # assert that it's at least half full
self.assertLessEqual(len(labels), self.obj.generalize_labels + 1) # allow +1 label because '' is cumulative
def test_labels_aggressive_folding(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader = get_success_reader_selected_labels()
self.obj.log.info(len(reader.data))
self.obj.generalize_labels = 25
self.obj.add_underling(reader)
self.obj.shutdown()
self.obj.post_process()
cum_dict = self.obj.cumulative
labels = list(cum_dict.keys())
self.assertEqual(len(labels), 6)
def test_labels_aggressive_folding_2(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
label_count = 50
reader = get_success_reader_shrinking_labels(max_label_size=int(label_count * 2), count=label_count)
self.obj.log.info(len(reader.data))
self.obj.generalize_labels = label_count
self.obj.add_underling(reader)
last = None
for point in self.obj.datapoints(True):
last = point
cum_dict = self.obj.cumulative
labels = list(cum_dict.keys())
labels_count = len(labels)
self.assertLessEqual(labels_count, label_count + 1) # didn't overflow
self.assertGreaterEqual(labels_count, label_count * 0.25) # at least a quarter-filled
self.assertEqual(1, len(last[DataPoint.SUBRESULTS]))
self.assertEqual(last, last[DataPoint.SUBRESULTS][0])
def test_errors_variety(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader1 = get_fail_reader()
reader2 = get_fail_reader_alot()
self.obj.max_error_count = 50
self.obj.add_underling(reader1)
self.obj.add_underling(reader2)
self.obj.shutdown()
self.obj.post_process()
expected = self.obj.max_error_count # due to randomness, it it can go a bit higher than limit
self.assertLessEqual(len(self.obj.known_errors), expected)
self.assertGreaterEqual(len(self.obj.known_errors),
self.obj.max_error_count / 2) # assert that it's at least half full
def test_uniq_errors(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
reader = get_fail_reader()
self.obj.max_error_count = 9
self.obj.add_underling(reader)
self.obj.shutdown()
self.obj.post_process()
cum_dict = self.obj.cumulative
self.assertEqual(len(cum_dict['']['errors']), 3)
def test_set_rtimes_len(self):
self.obj.settings['histogram-initial'] = 10.0
self.obj.prepare()
reader = get_fail_reader()
self.obj.add_underling(reader)
listener = MockListener()
listener.engine = self.obj.engine
self.obj.add_listener(listener)
self.obj.check()
for dp in listener.results:
for kpiset in dp['cumulative'].values():
self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)
for kpiset in dp['current'].values():
self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)
def test_inf_values(self):
self.obj.settings['max-buffer-len'] = "inf"
self.obj.prepare()
self.assertEqual(self.obj.max_buffer_len, float("inf"))
def test_datapoint_to_json(self):
self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
self.obj.prepare()
self.obj.add_underling(get_success_reader())
for point in self.obj.datapoints():
if point[DataPoint.SUBRESULTS] == [point]:
del point[DataPoint.SUBRESULTS]
self.obj.log.info(to_json(point))
def test_negative_response_time_scaling_crash(self):
self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
self.obj.prepare()
self.sniff_log(self.obj.log)
mock = MockReader()
mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))
self.obj.add_underling(mock)
self.obj.check()
for point in self.obj.datapoints():
self.obj.log.info(to_json(point))
self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
def test_ramp_up_exclude(self):
self.obj.track_percentiles = [50]
self.obj.prepare()
self.obj.engine.config['settings']['ramp-up-exclude'] = True
self.obj.engine.config['execution'] = [
{'scenario': 'first', 'ramp-up': 50},
{'scenario': 'second', 'ramp-up': '1s'},
{'scenario': 'third'}
]
self.obj.engine.config['scenarios'] = BetterDict.from_dict({
'first': {'requests': [{'url': 'first'}]},
'second': {'requests': [{'url': 'second'}]},
'third': {'requests': [{'url': 'third'}]}})
reader = get_success_reader()
self.obj.add_underling(reader)
self.obj.shutdown()
self.obj.post_process()
self.assertEquals(self.obj.cumulative, {})
| |
"""
file: consumers.py
desc: Implements WebSocket bindings for Django Channels.
auth: Lukas Yelle (@lxy5611)
Peter Zujko (pxz3370)
"""
import json
import string
from collections import namedtuple
import petitions.views as views
from asgiref.sync import async_to_sync
from channels.generic.websocket import JsonWebsocketConsumer
def get_petitions_and_map(petitions_obj, user=None):
"""
Helper Function.
Gathers and properly formats petitions for transmission back to the frontend via websocket.
:param petitions_obj: Database object of petitions.
:return: formatted object of the sent petitions: {"petitions": [ {}, ... ], "map":[ ... ]}.
"""
# Initialize variables
petition_map = {}
petitions = []
# Loop over every object in the petitions object passed to the function.
for x in range(len(petitions_obj)):
petition = petitions_obj[x]
tags = []
all_tags = petition.tags.all()
for t in all_tags:
tags.append({
"name": t.name,
"id": t.id
})
updates = []
all_updates = petition.updates.all()
for u in all_updates:
updates.append({
"description": u.description,
"timestamp": u.created_at.strftime("%B %d, %Y")
})
profile = user.profile if hasattr(user, "profile") else False
petitions.append({
'title': petition.title,
'description': json.dumps(petition.description.replace("'", "\'")),
'signatures': petition.signatures,
'author': petition.author.first_name + " " + petition.author.last_name,
'tags': tags,
'response': json.dumps({
'author': petition.response.author,
'description': petition.response.description,
'timestamp': petition.response.created_at.strftime("%B %d, %Y")
}) if petition.response is not None else False,
'updates': updates,
'timestamp': petition.created_at.strftime("%B %d, %Y"),
'expires': petition.expires.strftime("%B %d, %Y"),
'status': petition.status,
'in_progress': petition.in_progress,
'isSigned': profile.petitions_signed.filter(id=petition.id).exists() if profile is not False else False,
'deleted': False,
'id': petition.id
})
petition_map[petition.id] = x
return {
"petitions": petitions,
"map": petition_map
}
def paginate(petitions, page): return petitions[(page-1)*45:page*45]
class PetitionConsumer(JsonWebsocketConsumer):
def send_petitions_individually(self, petitions):
for petition in petitions:
petition = [petition]
petition = get_petitions_and_map(petition, self.scope["user"])
self.send_json({"command": "get", "petition": json.dumps(petition)})
def send_petitions(self, petitions, command=None):
user = self.scope["user"] if 'user' in self.scope else None
petitions = get_petitions_and_map(petitions, user)
if command is not None:
petitions.update({"command": command})
self.send_json(petitions)
def connect(self):
"""
Endpoint for the petitions_connect route. Fires when web socket(WS) connections are made to the server.
:param message: The WS message channel that connected.
:return: None
"""
self.group_name = "petitions"
# Add the WS connection to the petitions channels group
async_to_sync(self.channel_layer.group_add)(
self.group_name, self.channel_name)
# Default order is 'most recent' query the database for all petitions in that order.
petitions = paginate(views.sorting_controller("most recent"), 1)
self.accept()
self.send_petitions(petitions)
def disconnect(self, close_code):
"""
Endpoint for the petitions_disconnect route. Fires when web socket connections are dropped.
"""
async_to_sync(self.channel_layer.group_discard)(
self.group_name, self.channel_name)
def group_update(self, content):
self.send_json(content.get('text', ''))
def receive_json(self, data):
"""
Endpoint for the petitions_command route. Fires when a WS sends a message.
Handles the parsing of commands from the frontend (an API, of sorts).
:param data: Data sent to the websocket.
:return: None
"""
if data != "":
command = data.get('command', '')
if command != '':
if command == 'list':
# Parse the List command. Required data = sort. Optional = filter.
# Sends the WS a sorted and optionally filtered list of petitions.
sort = data.get('sort', '')
if sort:
petitions = views.sorting_controller(sort)
if data.get('filter', ''):
petitions = views.filtering_controller(
petitions, data.get('filter'))
self.send_petitions(petitions)
return None
self.send_json(
{"text": "Error. Must send 'sort' parameter"})
return None
elif command == 'get':
# Parse the Get command. Required data = id.
# Gets a single petition with a particular id.
data_id = data.get('id', '')
if data_id:
petition = [views.get_petition(
data_id, self.scope["user"])]
petition = get_petitions_and_map(
petition, self.scope["user"]) if petition[0] else False
reply = {
"command": "get",
"petition": petition
}
self.send_json(reply)
return None
elif command == 'all':
# Parse the search command. Required query. Optional = filter.
# Sends the WS a sorted and optionally filtered list of petitions.
petitions = views.sorting_controller("all")
if petitions:
self.send_petitions(petitions)
return None
return None
elif command == 'search':
# Parse the search command. Required query. Optional = filter.
# Sends the WS a sorted and optionally filtered list of petitions.
query = data.get('query', '')
if query:
petitions = views.sorting_controller("search", query)
self.send_petitions(petitions)
return None
return None
elif command == 'paginate':
# Parse the pageinate command. Required: page, sort. Optional filter.
# Sends the WS a sorted and optionally filtered list of petitions between a range.
sort = data.get('sort', '')
page = data.get('page', '')
if sort and page:
petitions = views.sorting_controller(sort)
if data.get('filter', ''):
petitions = views.filtering_controller(
petitions, data.get('filter'))
petitions = paginate(petitions, page)
if len(petitions) > 0:
self.send_petitions(petitions, 'paginate')
return None
self.send_json(
{"text": "Error. Must send 'sort' parameter"})
return None
self.send_json({"text": "Error must sent a non-empty 'command' parameter"})
return None
| |
import os
import subprocess
import time
from distutils import dir_util
from ccmlib import common as ccmcommon
from dtest import Tester, debug, create_ks, create_cf
from tools.assertions import assert_all, assert_none, assert_one
from tools.decorators import since
# WARNING: sstableloader tests should be added to TestSSTableGenerationAndLoading (below),
# and not to BaseSStableLoaderTest (which is shared with upgrade tests)
# Also used by upgrade_tests/storage_engine_upgrade_test
# to test loading legacy sstables
class BaseSStableLoaderTest(Tester):
__test__ = False
upgrade_from = None
compact = False
jvm_args = ()
allow_log_errors = True
def create_schema(self, session, ks, compression):
create_ks(session, ks, rf=2)
create_cf(session, "standard1", compression=compression, compact_storage=self.compact)
create_cf(session, "counter1", compression=compression, columns={'v': 'counter'},
compact_storage=self.compact)
def sstableloader_compression_none_to_none_test(self):
self.load_sstable_with_configuration(None, None)
def sstableloader_compression_none_to_snappy_test(self):
self.load_sstable_with_configuration(None, 'Snappy')
def sstableloader_compression_none_to_deflate_test(self):
self.load_sstable_with_configuration(None, 'Deflate')
def sstableloader_compression_snappy_to_none_test(self):
self.load_sstable_with_configuration('Snappy', None)
def sstableloader_compression_snappy_to_snappy_test(self):
self.load_sstable_with_configuration('Snappy', 'Snappy')
def sstableloader_compression_snappy_to_deflate_test(self):
self.load_sstable_with_configuration('Snappy', 'Deflate')
def sstableloader_compression_deflate_to_none_test(self):
self.load_sstable_with_configuration('Deflate', None)
def sstableloader_compression_deflate_to_snappy_test(self):
self.load_sstable_with_configuration('Deflate', 'Snappy')
def sstableloader_compression_deflate_to_deflate_test(self):
self.load_sstable_with_configuration('Deflate', 'Deflate')
def sstableloader_with_mv_test(self):
"""
@jira_ticket CASSANDRA-11275
"""
def create_schema_with_mv(session, ks, compression):
self.create_schema(session, ks, compression)
# create a materialized view
session.execute("CREATE MATERIALIZED VIEW mv1 AS "
"SELECT key FROM standard1 WHERE key IS NOT NULL AND c IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v)")
self.load_sstable_with_configuration(ks='"Keyspace1"', create_schema=create_schema_with_mv)
def copy_sstables(self, cluster, node):
for x in xrange(0, cluster.data_dir_count):
data_dir = os.path.join(node.get_path(), 'data{0}'.format(x))
copy_root = os.path.join(node.get_path(), 'data{0}_copy'.format(x))
for ddir in os.listdir(data_dir):
keyspace_dir = os.path.join(data_dir, ddir)
if os.path.isdir(keyspace_dir) and ddir != 'system':
copy_dir = os.path.join(copy_root, ddir)
dir_util.copy_tree(keyspace_dir, copy_dir)
def load_sstables(self, cluster, node, ks):
cdir = node.get_install_dir()
sstableloader = os.path.join(cdir, 'bin', ccmcommon.platform_binary('sstableloader'))
env = ccmcommon.make_cassandra_env(cdir, node.get_path())
host = node.address()
for x in xrange(0, cluster.data_dir_count):
sstablecopy_dir = os.path.join(node.get_path(), 'data{0}_copy'.format(x), ks.strip('"'))
for cf_dir in os.listdir(sstablecopy_dir):
full_cf_dir = os.path.join(sstablecopy_dir, cf_dir)
if os.path.isdir(full_cf_dir):
cmd_args = [sstableloader, '--nodes', host, full_cf_dir]
p = subprocess.Popen(cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
exit_status = p.wait()
debug('stdout: {out}'.format(out=p.stdout))
debug('stderr: {err}'.format(err=p.stderr))
self.assertEqual(0, exit_status,
"sstableloader exited with a non-zero status: {}".format(exit_status))
def load_sstable_with_configuration(self, pre_compression=None, post_compression=None, ks="ks", create_schema=create_schema):
"""
tests that the sstableloader works by using it to load data.
Compression of the columnfamilies being loaded, and loaded into
can be specified.
pre_compression and post_compression can be these values:
None, 'Snappy', or 'Deflate'.
"""
NUM_KEYS = 1000
for compression_option in (pre_compression, post_compression):
self.assertIn(compression_option, (None, 'Snappy', 'Deflate'))
debug("Testing sstableloader with pre_compression=%s and post_compression=%s" % (pre_compression, post_compression))
if self.upgrade_from:
debug("Testing sstableloader with upgrade_from=%s and compact=%s" % (self.upgrade_from, self.compact))
cluster = self.cluster
if self.upgrade_from:
debug("Generating sstables with version %s" % (self.upgrade_from))
default_install_dir = self.cluster.get_install_dir()
# Forcing cluster version on purpose
cluster.set_install_dir(version=self.upgrade_from)
debug("Using jvm_args={}".format(self.jvm_args))
cluster.populate(2).start(jvm_args=list(self.jvm_args))
node1, node2 = cluster.nodelist()
time.sleep(.5)
debug("creating keyspace and inserting")
session = self.cql_connection(node1)
self.create_schema(session, ks, pre_compression)
for i in range(NUM_KEYS):
session.execute("UPDATE standard1 SET v='{}' WHERE KEY='{}' AND c='col'".format(i, i))
session.execute("UPDATE counter1 SET v=v+1 WHERE KEY='{}'".format(i))
node1.nodetool('drain')
node1.stop()
node2.nodetool('drain')
node2.stop()
debug("Making a copy of the sstables")
# make a copy of the sstables
self.copy_sstables(cluster, node1)
debug("Wiping out the data and restarting cluster")
# wipe out the node data.
cluster.clear()
if self.upgrade_from:
debug("Running sstableloader with version from %s" % (default_install_dir))
# Return to previous version
cluster.set_install_dir(install_dir=default_install_dir)
cluster.start(jvm_args=list(self.jvm_args))
time.sleep(5) # let gossip figure out what is going on
debug("re-creating the keyspace and column families.")
session = self.cql_connection(node1)
self.create_schema(session, ks, post_compression)
time.sleep(2)
debug("Calling sstableloader")
# call sstableloader to re-load each cf.
self.load_sstables(cluster, node1, ks)
def read_and_validate_data(session):
for i in range(NUM_KEYS):
query = "SELECT * FROM standard1 WHERE KEY='{}'".format(i)
assert_one(session, query, [str(i), 'col', str(i)])
query = "SELECT * FROM counter1 WHERE KEY='{}'".format(i)
assert_one(session, query, [str(i), 1])
debug("Reading data back")
# Now we should have sstables with the loaded data, and the existing
# data. Lets read it all to make sure it is all there.
read_and_validate_data(session)
debug("scrubbing, compacting, and repairing")
# do some operations and try reading the data again.
node1.nodetool('scrub')
node1.nodetool('compact')
node1.nodetool('repair')
debug("Reading data back one more time")
read_and_validate_data(session)
# check that RewindableDataInputStreamPlus spill files are properly cleaned up
if self.upgrade_from:
for x in xrange(0, cluster.data_dir_count):
data_dir = os.path.join(node1.get_path(), 'data{0}'.format(x))
for ddir in os.listdir(data_dir):
keyspace_dir = os.path.join(data_dir, ddir)
temp_files = self.glob_data_dirs(os.path.join(keyspace_dir, '*', "tmp", "*.dat"))
debug("temp files: " + str(temp_files))
self.assertEquals(0, len(temp_files), "Temporary files were not cleaned up.")
class TestSSTableGenerationAndLoading(BaseSStableLoaderTest):
__test__ = True
def sstableloader_uppercase_keyspace_name_test(self):
"""
Make sure sstableloader works with upper case keyspace
@jira_ticket CASSANDRA-10806
"""
self.load_sstable_with_configuration(ks='"Keyspace1"')
def incompressible_data_in_compressed_table_test(self):
"""
tests for the bug that caused #3370:
https://issues.apache.org/jira/browse/CASSANDRA-3370
@jira_ticket CASSANDRA-3370
inserts random data into a compressed table. The compressed SSTable was
compared to the uncompressed and was found to indeed be larger then
uncompressed.
"""
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
time.sleep(.5)
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', compression="Deflate")
# make unique column names, and values that are incompressible
for col in xrange(10):
col_name = str(col)
col_val = os.urandom(5000)
col_val = col_val.encode('hex')
cql = "UPDATE cf SET v='%s' WHERE KEY='0' AND c='%s'" % (col_val, col_name)
# print cql
session.execute(cql)
node1.flush()
time.sleep(2)
rows = list(session.execute("SELECT * FROM cf WHERE KEY = '0' AND c < '8'"))
self.assertGreater(len(rows), 0)
def remove_index_file_test(self):
"""
tests for situations similar to that found in #343:
https://issues.apache.org/jira/browse/CASSANDRA-343
@jira_ticket CASSANDRA-343
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# Makinge sure the cluster is ready to accept the subsequent
# stress connection. This was an issue on Windows.
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])
node1.flush()
node1.compact()
node1.stop()
time.sleep(1)
paths = []
for data_dir in node1.data_directories():
basepath = os.path.join(data_dir, 'keyspace1')
for x in os.listdir(basepath):
if x.startswith("standard1"):
path = os.path.join(basepath, x)
os.system('rm %s/*Index.db' % path)
os.system('rm %s/*Filter.db' % path)
os.system('rm %s/*Statistics.db' % path)
os.system('rm %s/*Digest.sha1' % path)
paths.append(path)
node1.start()
time.sleep(10)
data_found = 0
for path in paths:
for fname in os.listdir(path):
if fname.endswith('Data.db'):
data_found += 1
self.assertGreater(data_found, 0, "After removing index, filter, stats, and digest files, the data file was deleted!")
def sstableloader_with_mv_test(self):
"""
@jira_ticket CASSANDRA-11275
"""
def create_schema_with_mv(session, ks, compression):
self.create_schema(session, ks, compression)
# create a materialized view
session.execute("CREATE MATERIALIZED VIEW mv1 AS "
"SELECT key FROM standard1 WHERE key IS NOT NULL AND c IS NOT NULL AND v IS NOT NULL "
"PRIMARY KEY (v)")
self.load_sstable_with_configuration(ks='"Keyspace1"', create_schema=create_schema_with_mv)
@since('4.0')
def sstableloader_with_failing_2i_test(self):
"""
@jira_ticket CASSANDRA-10130
Simulates an index building failure during SSTables load.
The table data should be loaded and the index should be marked for rebuilding during the next node start.
"""
def create_schema_with_2i(session):
create_ks(session, 'k', 1)
session.execute("CREATE TABLE k.t (p int, c int, v int, PRIMARY KEY(p, c))")
session.execute("CREATE INDEX idx ON k.t(v)")
cluster = self.cluster
cluster.populate(1, install_byteman=True).start(wait_for_binary_proto=True)
node = cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_schema_with_2i(session)
session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 1, 8)")
# Stop node and copy SSTables
node.nodetool('drain')
node.stop()
self.copy_sstables(cluster, node)
# Wipe out data and restart
cluster.clear()
cluster.start()
# Restore the schema
session = self.patient_cql_connection(node)
create_schema_with_2i(session)
# The table should exist and be empty, and the index should be empty and marked as built
assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
assert_none(session, "SELECT * FROM k.t")
assert_none(session, "SELECT * FROM k.t WHERE v = 8")
# Add some additional data before loading the SSTable, to check that it will be still accessible
session.execute("INSERT INTO k.t(p, c, v) VALUES (0, 2, 8)")
assert_one(session, "SELECT * FROM k.t", [0, 2, 8])
assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])
# Load SSTables with a failure during index creation
node.byteman_submit(['./byteman/index_build_failure.btm'])
with self.assertRaises(Exception):
self.load_sstables(cluster, node, 'k')
# Check that the index isn't marked as built and the old SSTable data has been loaded but not indexed
assert_none(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""")
assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
assert_one(session, "SELECT * FROM k.t WHERE v = 8", [0, 2, 8])
# Restart the node to trigger index rebuild
node.nodetool('drain')
node.stop()
cluster.start()
session = self.patient_cql_connection(node)
# Check that the index is marked as built and the index has been rebuilt
assert_one(session, """SELECT * FROM system."IndexInfo" WHERE table_name='k'""", ['k', 'idx', None])
assert_all(session, "SELECT * FROM k.t", [[0, 1, 8], [0, 2, 8]])
assert_all(session, "SELECT * FROM k.t WHERE v = 8", [[0, 1, 8], [0, 2, 8]])
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import (
campaign_criterion_simulation,
)
from google.ads.googleads.v9.services.types import (
campaign_criterion_simulation_service,
)
from .base import (
CampaignCriterionSimulationServiceTransport,
DEFAULT_CLIENT_INFO,
)
class CampaignCriterionSimulationServiceGrpcTransport(
CampaignCriterionSimulationServiceTransport
):
"""gRPC backend transport for CampaignCriterionSimulationService.
Service to fetch campaign criterion simulations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_campaign_criterion_simulation(
self,
) -> Callable[
[
campaign_criterion_simulation_service.GetCampaignCriterionSimulationRequest
],
campaign_criterion_simulation.CampaignCriterionSimulation,
]:
r"""Return a callable for the get campaign criterion
simulation method over gRPC.
Returns the requested campaign criterion simulation in full
detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetCampaignCriterionSimulationRequest],
~.CampaignCriterionSimulation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_campaign_criterion_simulation" not in self._stubs:
self._stubs[
"get_campaign_criterion_simulation"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.CampaignCriterionSimulationService/GetCampaignCriterionSimulation",
request_serializer=campaign_criterion_simulation_service.GetCampaignCriterionSimulationRequest.serialize,
response_deserializer=campaign_criterion_simulation.CampaignCriterionSimulation.deserialize,
)
return self._stubs["get_campaign_criterion_simulation"]
__all__ = ("CampaignCriterionSimulationServiceGrpcTransport",)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Lists of functions whitelisted/blacklisted for automatic mixed precision in symbol API."""
from ...runtime import Features
# Functions that should be cast to lower precision
FP16_FUNCS = [
'_linalg_gemm',
'_linalg_gemm2',
'_npi_einsum',
'_npi_matmul',
'Convolution',
'Deconvolution',
'FullyConnected',
'RNN',
]
# Functions that should not be casted, either because
# they are irrelevant (not used in the network itself
# like image transformations or optimizers) or they
# are dtype neutral (can work in both fp16 and fp32)
FP16_FP32_FUNCS = [
'BatchNorm',
'BilinearSampler',
'BlockGrad',
'Cast',
'cast_storage',
'_contrib_BatchNormWithReLU',
'_contrib_allclose',
'_contrib_arange_like',
'_contrib_dynamic_reshape',
'_contrib_intgemm_fully_connected',
'_contrib_intgemm_maxabsolute',
'_contrib_intgemm_prepare_data',
'_contrib_intgemm_prepare_weight',
'_contrib_intgemm_take_weight',
'_contrib_quantized_batch_norm',
'_contrib_quantized_elemwise_mul',
'_contrib_quantized_embedding',
'_contrib_mrcnn_mask_target',
'_contrib_round_ste',
'_contrib_sign_ste',
'Crop',
'Dropout',
'Embedding',
'Flatten',
'GridGenerator',
'Pad',
'Pooling',
'ROIPooling',
'Reshape',
'SequenceLast',
'SequenceMask',
'SequenceReverse',
'SliceChannel',
'SpatialTransformer',
'SwapAxis',
'UpSampling',
'_CachedOp',
'_CachedOpThreadSafe',
'_CrossDeviceCopy',
'_CustomFunction',
'_FusedOp',
'_FusedOpHelper',
'_FusedOpOutHelper',
'_NoGradient',
'_adabelief_update',
'_adamw_update',
'_arange',
'_cond',
'_contrib_AdaptiveAvgPooling2D',
'_contrib_BilinearResize2D',
'_contrib_bipartite_matching',
'_contrib_dequantize',
'_contrib_div_sqrt_dim',
'_contrib_boolean_mask',
'_contrib_getnnz',
'_contrib_gradientmultiplier',
'_contrib_group_adagrad_update',
'_contrib_index_array',
'_contrib_index_copy',
'_contrib_quadratic',
'_contrib_quantize',
'_contrib_quantize_v2',
'_contrib_quantized_concat',
'_contrib_quantized_conv',
'_contrib_quantized_flatten',
'_contrib_quantized_fully_connected',
'_contrib_quantized_pooling',
'_contrib_quantized_elemwise_add',
'_contrib_quantized_act',
'_image_crop',
'_linspace',
'_contrib_requantize',
'_copy',
'_copyto',
'_cvcopyMakeBorder',
'_cvimdecode',
'_cvimread',
'_cvimresize',
'_div_scalar',
'_equal_scalar',
'_eye',
'_foreach',
'_while_loop',
'_full',
'_grad_add',
'_greater_scalar',
'_greater_equal_scalar',
'_histogram',
'_hypot_scalar',
'_identity_with_attr_like_rhs',
'_image_adjust_lighting',
'_image_flip_left_right',
'_image_flip_top_bottom',
'_image_normalize',
'_image_random_brightness',
'_image_random_color_jitter',
'_image_random_contrast',
'_image_random_crop',
'_image_random_resized_crop',
'_image_random_flip_left_right',
'_image_random_flip_top_bottom',
'_image_random_hue',
'_image_random_lighting',
'_image_random_saturation',
'_image_resize',
'_image_to_tensor',
'_imdecode',
'_lesser_scalar',
'_lesser_equal_scalar',
'_logical_and_scalar',
'_logical_or_scalar',
'_logical_xor_scalar',
'_maximum_scalar',
'_minimum_scalar',
'_minus_scalar',
'_mod_scalar',
'_mp_adabelief_update',
'_mp_adamw_update',
'_mul_scalar',
'_multi_adabelief_update',
'_multi_adamw_update',
'_multi_lamb_update',
'_multi_lans_update',
'_multi_mp_adabelief_update',
'_multi_mp_adamw_update',
'_multi_mp_lamb_update',
'_multi_mp_lans_update',
'_not_equal_scalar',
'_np_reshape',
'_npi_absolute',
'_npi_add',
'_npi_add_scalar',
'_npi_advanced_indexing',
'_npi_advanced_indexing_multiple',
'_npi_all',
'_npi_any',
'_npi_arange',
'_npi_arccosh',
'_npi_arcsinh',
'_npi_arctan',
'_npi_arctan2',
'_npi_arctan2_scalar',
'_npi_argmax',
'_npi_argmin',
'_npi_around',
'_npi_atleast_1d',
'_npi_atleast_2d',
'_npi_atleast_3d',
'_npi_bernoulli',
'_npi_bincount',
'_npi_bitwise_and',
'_npi_bitwise_and_scalar',
'_npi_bitwise_not',
'_npi_bitwise_or',
'_npi_bitwise_or_scalar',
'_npi_bitwise_xor',
'_npi_bitwise_xor_scalar',
'_npi_bitwise_left_shift',
'_npi_bitwise_left_shift_scalar',
'_npi_bitwise_right_shift',
'_npi_bitwise_right_shift_scalar',
'_npi_rbitwise_left_shift_scalar',
'_npi_rbitwise_right_shift_scalar',
'_npi_blackman',
'_npi_boolean_mask_assign_scalar',
'_npi_boolean_mask_assign_tensor',
'_npi_broadcast_to',
'_npi_cbrt',
'_npi_ceil',
'_npi_choice',
'_npi_copy',
'_npi_copysign_scalar',
'_npi_cos',
'_npi_degrees',
'_npi_delete',
'_npi_diag',
'_npi_diag_indices_from',
'_npi_diagflat',
'_npi_diagonal',
'_npi_diff',
'_npi_dsplit',
'_npi_equal_scalar',
'_npi_exponential',
'_npi_eye',
'_npi_fill_diagonal',
'_npi_fix',
'_npi_flip',
'_npi_floor',
'_npi_fmax_scalar',
'_npi_fmin_scalar',
'_npi_fmod_scalar',
'_npi_full',
'_npi_full_like',
'_npi_gamma',
'_npi_greater_equal_scalar',
'_npi_greater_scalar',
'_npi_gumbel',
'_npi_hamming',
'_npi_hanning',
'_npi_hsplit',
'_npi_identity',
'_npi_indices',
'_npi_insert_scalar',
'_npi_insert_slice',
'_npi_insert_tensor',
'_npi_interp',
'_npi_isinf',
'_npi_isfinite',
'_npi_isnan',
'_npi_isneginf',
'_npi_isposinf',
'_npi_laplace',
'_npi_less_equal_scalar',
'_npi_less_scalar',
'_npi_logistic',
'_npi_lcm',
'_npi_lcm_scalar',
'_npi_gcd',
'_npi_gcd_scalar',
'_npi_linspace',
'_npi_logical_not',
'_npi_logical_and_scalar',
'_npi_logical_or_scalar',
'_npi_logical_xor_scalar',
'_npi_logspace',
'_npi_max',
'_npi_min',
'_npi_mod',
'_npi_mod_scalar',
'_npi_moveaxis',
'_npi_multinomial',
'_npi_multiply',
'_npi_multiply_scalar',
'_npi_floor_divide',
'_npi_floor_divide_scalar',
'_npi_rfloor_divide_scalar',
'_npi_nan_to_num',
'_npi_negative',
'_npi_normal',
'_npi_normal_n',
'_npi_not_equal_scalar',
'_npi_ones',
'_npi_pad',
'_npi_pareto',
'_npi_percentile',
'_npi_powerd',
'_npi_radians',
'_npi_rarctan2_scalar',
'_npi_rayleigh',
'_npi_rcopysign_scalar',
'_npi_repeats',
'_npi_rfmod_scalar',
'_npi_rint',
'_npi_rmod_scalar',
'_npi_roll',
'_npi_rollaxis',
'_npi_rot90',
'_npi_rsubtract_scalar',
'_npi_rtrue_divide_scalar',
'_npi_share_memory',
'_npi_sign',
'_npi_sin',
'_npi_sqrt',
'_npi_squeeze',
'_npi_subtract',
'_npi_subtract_scalar',
'_npi_tanh',
'_npi_transpose',
'_npi_tri',
'_npi_tril',
'_npi_tril_indices',
'_npi_triu',
'_npi_true_divide',
'_npi_true_divide_scalar',
'_npi_trunc',
'_npi_uniform',
'_npi_uniform_n',
'_npi_unique',
'_npi_weibull',
'_npi_where_lscalar',
'_npi_where_rscalar',
'_npi_where_scalar2',
'_npi_zeros',
'_npx_constraint_check',
'_npx_nonzero',
'_npx_relu',
'_npx_reshape',
'_npx_sigmoid',
'_npx_cond',
'_npx_foreach',
'_npx_while_loop',
'_onehot_encode',
'_ones',
'_plus_scalar',
'_random_exponential',
'_random_exponential_like',
'_random_gamma',
'_random_gamma_like',
'_random_binomial',
'_random_binomial_like',
'_random_generalized_negative_binomial',
'_random_generalized_negative_binomial_like',
'_random_negative_binomial',
'_random_negative_binomial_like',
'_random_normal',
'_random_normal_like',
'_random_poisson',
'_random_poisson_like',
'_random_randint',
'_random_uniform',
'_random_uniform_like',
'_ravel_multi_index',
'_rminus_scalar',
'_rmod_scalar',
'_rnn_param_concat',
'_sample_exponential',
'_sample_gamma',
'_sample_binomial',
'_sample_generalized_negative_binomial',
'_sample_categorical',
'_sample_multinomial',
'_sample_negative_binomial',
'_sample_normal',
'_sample_poisson',
'_sample_uniform',
'_sample_unique_zipfian',
'_scatter_set_nd',
'_set_value',
'_shuffle',
'_slice_assign',
'_slice_assign_scalar',
'_sparse_adagrad_update',
'_sparse_retain',
'_split_v2',
'_unravel_index',
'_zeros',
'_zeros_without_dtype',
'abs',
'adam_update',
'all_finite',
'amp_cast',
'amp_multicast',
'arccosh',
'arcsinh',
'arctan',
'argmax',
'argmax_channel',
'argmin',
'batch_take',
'broadcast_axis',
'broadcast_like',
'broadcast_to',
'cbrt',
'ceil',
'clip',
'col2im',
'cos',
'degrees',
'depth_to_space',
'diag',
'erf',
'expand_dims',
'fill_element_0index',
'fix',
'floor',
'ftml_update',
'ftrl_update',
'gather_nd',
'hard_sigmoid',
'im2col',
'lamb_update_phase1',
'lamb_update_phase2',
'logical_not',
'log_sigmoid',
'max',
'min',
'mish',
'mp_lamb_update_phase1',
'mp_lamb_update_phase2',
'mp_nag_mom_update',
'mp_sgd_mom_update',
'mp_sgd_update',
'multi_all_finite',
'multi_lars',
'multi_mp_sgd_mom_update',
'multi_mp_sgd_update',
'multi_sgd_mom_update',
'multi_sgd_update',
'multi_sum_sq',
'nag_mom_update',
'negative',
'one_hot',
'ones_like',
'pick',
'preloaded_multi_mp_sgd_mom_update',
'preloaded_multi_mp_sgd_update',
'preloaded_multi_sgd_mom_update',
'preloaded_multi_sgd_update',
'radians',
'relu',
'repeat',
'reset_arrays',
'reshape_like',
'reverse',
'rint',
'rmsprop_update',
'rmspropalex_update',
'round',
'scatter_nd',
'sgd_mom_update',
'sgd_update',
'shape_array',
'sigmoid',
'sign',
'signsgd_update',
'signum_update',
'sin',
'size_array',
'slice',
'slice_axis',
'slice_like',
'softsign',
'sort',
'space_to_depth',
'sqrt',
'squeeze',
'take',
'tanh',
'tile',
'transpose',
'trunc',
'zeros_like',
]
# Functions that have to be cast to FP32 due to possible
# overflows
FP32_FUNCS = [
'IdentityAttachKLSparseReg',
'arccos',
'arcsin',
'cosh',
'erfinv',
'sinh',
'tan',
'arctanh',
'_contrib_calibrate_entropy',
'_contrib_MultiBoxDetection',
'_contrib_MultiBoxPrior',
'_contrib_MultiBoxTarget',
'_npi_arccos',
'_npi_arcsin',
'_npi_arctanh',
'_npi_cosh',
'_npi_sinh',
'_npi_tan',
# Exponents
'_npi_exp',
'_npi_expm1',
'_npi_ldexp',
'_npi_ldexp_scalar',
'_npi_logaddexp',
'_npi_logaddexp_scalar',
'_npi_log',
'_npi_log10',
'_npi_log1p',
'_npi_log2',
'_npi_rldexp_scalar',
'exp',
'expm1',
'log',
'log10',
'log2',
'log1p',
# Powers
'broadcast_power',
'square',
'reciprocal',
'_rdiv_scalar',
'rsqrt',
'rcbrt',
'_power',
'_power_scalar',
'_rpower_scalar',
'_square_sum',
'_contrib_hawkesll',
'_npi_power',
'_npi_power_scalar',
'_npi_reciprocal',
'_npi_rpower_scalar',
'_npi_square',
# Reductions
'_npi_average',
'_npi_cumsum',
'_npi_mean',
'_npi_polyval',
'_npi_prod',
'_npi_std',
'_npi_sum',
'_npi_trace',
'_npi_var',
'sum',
'nansum',
'prod',
'nanprod',
'mean',
'norm',
'softmin',
'khatri_rao',
'moments',
# Misc
'_npi_cholesky',
'_npi_eig',
'_npi_eigh',
'_npi_eigvals',
'_npi_eigvalsh',
'_npi_lstsq',
'_npi_matrix_rank',
'_npi_matrix_rank_none_tol',
'_npi_norm',
'_npi_pinv',
'_npi_pinv_scalar_rcond',
'_npi_qr',
'_npi_solve',
'_npi_svd',
'_npi_tensorinv',
'_npi_tensorsolve',
'digamma',
'gamma',
'gammaln',
'_linalg_gelqf',
'_linalg_potrf',
'_linalg_potri',
'_linalg_sumlogdiag',
'_linalg_syevd',
'_linalg_syrk',
'_linalg_trmm',
'_linalg_trsm',
'_linalg_makediag',
'_linalg_extractdiag',
'_linalg_maketrian',
'_linalg_extracttrian',
'_linalg_inverse',
'_linalg_det',
'_linalg_slogdet',
'_NDArray',
'_Native',
'_contrib_count_sketch',
'_contrib_SyncBatchNorm',
'_contrib_fft',
'argsort',
'topk',
# Neural network
'SoftmaxOutput',
'softmax',
'log_softmax',
'masked_softmax',
'masked_log_softmax',
'InstanceNorm',
'LayerNorm',
'GroupNorm',
'L2Normalization',
'LRN',
'SoftmaxActivation',
'LinearRegressionOutput',
'LogisticRegressionOutput',
'MAERegressionOutput',
'SVMOutput',
'softmax_cross_entropy',
'smooth_l1',
'MakeLoss',
'make_loss',
'Custom',
'CTCLoss',
'_npx_deformable_convolution',
'_npx_modulated_deformable_convolution',
'_contrib_DeformablePSROIPooling',
'_contrib_sldwin_atten_score',
'_contrib_sldwin_atten_mask_like',
'_contrib_sldwin_atten_context',
]
if Features().is_enabled('ONEDNN'):
FP32_FUNCS.extend([
'_sg_onednn_conv',
'_sg_onednn_fully_connected',
'_sg_onednn_selfatt_qk',
'_sg_onednn_selfatt_valatt',
'_sg_onednn_batch_dot'
])
# Functions that have to be cast to FP32 only for
# some values of their parameters
CONDITIONAL_FP32_FUNCS = [
('Activation', 'act_type', ['softrelu']),
('LeakyReLU', 'act_type', ['elu', 'selu']),
]
# Functions with multiple inputs, that need the same
# type of all their inputs
WIDEST_TYPE_CASTS = [
'_equal',
'_greater',
'_greater_equal',
'_hypot',
'_lesser',
'_lesser_equal',
'_logical_and',
'_logical_or',
'_logical_xor',
'_maximum',
'_minimum',
'_mod',
'_not_equal',
'_npi_column_stack',
'_npi_copysign',
'_npi_cross',
'_npi_dot',
'_npi_ediff1d',
'_npi_equal',
'_npi_fmax',
'_npi_fmin',
'_npi_fmod',
'_npi_greater',
'_npi_greater_equal',
'_npi_hypot',
'_npi_kron',
'_npi_less',
'_npi_less_equal',
'_npi_logical_and',
'_npi_logical_or',
'_npi_logical_xor',
'_npi_not_equal',
'_npi_dstack',
'_npi_hstack',
'_npi_tensordot',
'_npi_tensordot_int_axes',
'_npi_vstack',
'_npi_where',
'_npx_index_add',
'_npx_index_update',
'Concat',
'_contrib_RROIAlign',
'Correlation',
'add_n',
'batch_dot',
'broadcast_add',
'broadcast_div',
'broadcast_equal',
'broadcast_greater',
'broadcast_greater_equal',
'broadcast_hypot',
'broadcast_lesser',
'broadcast_lesser_equal',
'broadcast_logical_and',
'broadcast_logical_or',
'broadcast_logical_xor',
'broadcast_maximum',
'broadcast_minimum',
'broadcast_mod',
'broadcast_mul',
'broadcast_not_equal',
'broadcast_sub',
'dot',
'elemwise_add',
'elemwise_div',
'elemwise_mul',
'elemwise_sub',
'stack',
'_contrib_MultiProposal',
'_contrib_PSROIPooling',
'_contrib_Proposal',
'_contrib_ROIAlign',
'_contrib_box_decode',
'_contrib_box_encode',
'_contrib_box_iou',
'_contrib_box_nms',
'_contrib_dgl_adjacency',
'_contrib_dgl_csr_neighbor_non_uniform_sample',
'_contrib_dgl_csr_neighbor_uniform_sample',
'_contrib_dgl_graph_compact',
'_contrib_dgl_subgraph',
'_contrib_edge_id',
'_contrib_interleaved_matmul_encdec_qk',
'_contrib_interleaved_matmul_encdec_valatt',
'_contrib_interleaved_matmul_selfatt_qk',
'_contrib_interleaved_matmul_selfatt_valatt',
'where',
'_random_pdf_gamma',
'_random_pdf_exponential',
'_random_pdf_uniform',
'_random_pdf_negative_binomial',
'_random_pdf_generalized_negative_binomial',
'_random_pdf_dirichlet',
'_random_pdf_normal',
'_random_pdf_poisson',
]
LOSS_OUTPUT_FUNCTIONS = [
'SoftmaxOutput',
'LinearRegressionOutput',
'LogisticRegressionOutput',
'MAERegressionOutput',
]
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import imaplib
import unittest
from mock import Mock, patch, mock_open
from airflow import AirflowException
from airflow.contrib.hooks.imap_hook import ImapHook
from airflow.models import Connection
from airflow.utils import db
imaplib_string = 'airflow.contrib.hooks.imap_hook.imaplib'
open_string = 'airflow.contrib.hooks.imap_hook.open'
def _create_fake_imap(mock_imaplib, with_mail=False, attachment_name='test1.csv'):
mock_conn = Mock(spec=imaplib.IMAP4_SSL)
mock_imaplib.IMAP4_SSL.return_value = mock_conn
mock_conn.login.return_value = ('OK', [])
if with_mail:
mock_conn.select.return_value = ('OK', [])
mock_conn.search.return_value = ('OK', [b'1'])
mail_string = \
'Content-Type: multipart/mixed; boundary=123\r\n--123\r\n' \
'Content-Disposition: attachment; filename="{}";' \
'Content-Transfer-Encoding: base64\r\nSWQsTmFtZQoxLEZlbGl4\r\n--123--'.format(attachment_name)
mock_conn.fetch.return_value = ('OK', [(b'', mail_string.encode('utf-8'))])
mock_conn.close.return_value = ('OK', [])
mock_conn.logout.return_value = ('OK', [])
return mock_conn
class TestImapHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='imap_default',
host='imap_server_address',
login='imap_user',
password='imap_password'
)
)
@patch(imaplib_string)
def test_connect_and_disconnect(self, mock_imaplib):
mock_conn = _create_fake_imap(mock_imaplib)
with ImapHook():
pass
mock_imaplib.IMAP4_SSL.assert_called_once_with('imap_server_address')
mock_conn.login.assert_called_once_with('imap_user', 'imap_password')
assert mock_conn.logout.call_count == 1
@patch(imaplib_string)
def test_has_mail_attachments_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
has_attachment_in_inbox = imap_hook.has_mail_attachment('test1.csv')
self.assertTrue(has_attachment_in_inbox)
@patch(imaplib_string)
def test_has_mail_attachments_not_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
has_attachment_in_inbox = imap_hook.has_mail_attachment('test1.txt')
self.assertFalse(has_attachment_in_inbox)
@patch(imaplib_string)
def test_has_mail_attachments_with_regex_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
has_attachment_in_inbox = imap_hook.has_mail_attachment(
name=r'test(\d+).csv',
check_regex=True
)
self.assertTrue(has_attachment_in_inbox)
@patch(imaplib_string)
def test_has_mail_attachments_with_regex_not_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
has_attachment_in_inbox = imap_hook.has_mail_attachment(
name=r'test_(\d+).csv',
check_regex=True
)
self.assertFalse(has_attachment_in_inbox)
@patch(imaplib_string)
def test_retrieve_mail_attachments_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
attachments_in_inbox = imap_hook.retrieve_mail_attachments('test1.csv')
self.assertEqual(attachments_in_inbox, [('test1.csv', b'SWQsTmFtZQoxLEZlbGl4')])
@patch(imaplib_string)
def test_retrieve_mail_attachments_not_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
self.assertRaises(AirflowException, imap_hook.retrieve_mail_attachments, 'test1.txt')
@patch(imaplib_string)
def test_retrieve_mail_attachments_with_regex_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
attachments_in_inbox = imap_hook.retrieve_mail_attachments(
name=r'test(\d+).csv',
check_regex=True
)
self.assertEqual(attachments_in_inbox, [('test1.csv', b'SWQsTmFtZQoxLEZlbGl4')])
@patch(imaplib_string)
def test_retrieve_mail_attachments_with_regex_not_found(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
self.assertRaises(AirflowException,
imap_hook.retrieve_mail_attachments,
name=r'test_(\d+).csv',
check_regex=True)
@patch(imaplib_string)
def test_retrieve_mail_attachments_latest_only(self, mock_imaplib):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
attachments_in_inbox = imap_hook.retrieve_mail_attachments(
name='test1.csv',
latest_only=True
)
self.assertEqual(attachments_in_inbox, [('test1.csv', b'SWQsTmFtZQoxLEZlbGl4')])
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_found(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
imap_hook.download_mail_attachments('test1.csv', 'test_directory')
mock_open_method.assert_called_once_with('test_directory/test1.csv', 'wb')
mock_open_method.return_value.write.assert_called_once_with(b'SWQsTmFtZQoxLEZlbGl4')
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_not_found(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
self.assertRaises(AirflowException,
imap_hook.download_mail_attachments, 'test1.txt', 'test_directory')
mock_open_method.assert_not_called()
mock_open_method.return_value.write.assert_not_called()
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_with_regex_found(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
imap_hook.download_mail_attachments(
name=r'test(\d+).csv',
local_output_directory='test_directory',
check_regex=True
)
mock_open_method.assert_called_once_with('test_directory/test1.csv', 'wb')
mock_open_method.return_value.write.assert_called_once_with(b'SWQsTmFtZQoxLEZlbGl4')
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_with_regex_not_found(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
self.assertRaises(AirflowException,
imap_hook.download_mail_attachments,
name=r'test_(\d+).csv',
local_output_directory='test_directory',
check_regex=True)
mock_open_method.assert_not_called()
mock_open_method.return_value.write.assert_not_called()
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_with_latest_only(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True)
with ImapHook() as imap_hook:
imap_hook.download_mail_attachments(
name='test1.csv',
local_output_directory='test_directory',
latest_only=True
)
mock_open_method.assert_called_once_with('test_directory/test1.csv', 'wb')
mock_open_method.return_value.write.assert_called_once_with(b'SWQsTmFtZQoxLEZlbGl4')
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_with_escaping_chars(self, mock_imaplib, mock_open_method):
_create_fake_imap(mock_imaplib, with_mail=True, attachment_name='../test1.csv')
with ImapHook() as imap_hook:
imap_hook.download_mail_attachments(
name='../test1.csv',
local_output_directory='test_directory'
)
mock_open_method.assert_not_called()
mock_open_method.return_value.write.assert_not_called()
@patch('airflow.contrib.hooks.imap_hook.os.path.islink', return_value=True)
@patch(open_string, new_callable=mock_open)
@patch(imaplib_string)
def test_download_mail_attachments_with_symlink(self, mock_imaplib, mock_open_method, mock_is_symlink):
_create_fake_imap(mock_imaplib, with_mail=True, attachment_name='symlink')
with ImapHook() as imap_hook:
imap_hook.download_mail_attachments(
name='symlink',
local_output_directory='test_directory'
)
assert mock_is_symlink.call_count == 1
mock_open_method.assert_not_called()
mock_open_method.return_value.write.assert_not_called()
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Originally copied from python-glanceclient
import copy
import hashlib
import httplib
import json
import posixpath
import re
import socket
import StringIO
import struct
import urlparse
import OpenSSL
from oslo_log import log as logging
from six import moves
from tempest_lib import exceptions as lib_exc
from tempest import exceptions as exc
LOG = logging.getLogger(__name__)
USER_AGENT = 'tempest'
CHUNKSIZE = 1024 * 64 # 64kB
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
class HTTPClient(object):
def __init__(self, auth_provider, filters, **kwargs):
self.auth_provider = auth_provider
self.filters = filters
self.endpoint = auth_provider.base_url(filters)
endpoint_parts = urlparse.urlparse(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
self.endpoint_path = endpoint_parts.path
self.connection_class = self.get_connection_class(self.endpoint_scheme)
self.connection_kwargs = self.get_connection_kwargs(
self.endpoint_scheme, **kwargs)
@staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
else:
return httplib.HTTPConnection
@staticmethod
def get_connection_kwargs(scheme, **kwargs):
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
if scheme == 'https':
_kwargs['ca_certs'] = kwargs.get('ca_certs', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
return _kwargs
def get_connection(self):
_class = self.connection_class
try:
return _class(self.endpoint_hostname, self.endpoint_port,
**self.connection_kwargs)
except httplib.InvalidURL:
raise exc.EndpointNotFound
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
self._log_request(method, url, kwargs['headers'])
conn = self.get_connection()
try:
url_parts = urlparse.urlparse(url)
conn_url = posixpath.normpath(url_parts.path)
LOG.debug('Actual Path: {path}'.format(path=conn_url))
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
conn.putrequest(method, conn_url)
for header, value in kwargs['headers'].items():
conn.putheader(header, value)
conn.endheaders()
chunk = kwargs['body'].read(CHUNKSIZE)
# Chunk it, baby...
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = kwargs['body'].read(CHUNKSIZE)
conn.send('0\r\n\r\n')
else:
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = ("Error finding address for %(url)s: %(e)s" %
{'url': url, 'e': e})
raise exc.EndpointNotFound(message)
except (socket.error, socket.timeout) as e:
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': self.endpoint, 'e': e})
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([body_chunk for body_chunk in body_iter])
body_iter = StringIO.StringIO(body_str)
self._log_response(resp, None)
else:
self._log_response(resp, body_iter)
return resp, body_iter
def _log_request(self, method, url, headers):
LOG.info('Request: ' + method + ' ' + url)
if headers:
headers_out = headers
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
token = headers['X-Auth-Token']
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
headers_out = headers.copy()
headers_out['X-Auth-Token'] = "<Token omitted>"
LOG.info('Request Headers: ' + str(headers_out))
def _log_response(self, resp, body):
status = str(resp.status)
LOG.info("Response Status: " + status)
if resp.getheaders():
LOG.info('Response Headers: ' + str(resp.getheaders()))
if body:
str_body = str(body)
length = len(body)
LOG.info('Response Body: ' + str_body[:2048])
if length >= 2048:
self.LOG.debug("Large body (%d) md5 summary: %s", length,
hashlib.md5(str_body).hexdigest())
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
if kwargs['headers']['Content-Type'] != 'application/json':
msg = "Only application/json content-type is supported."
raise lib_exc.InvalidContentType(msg)
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
if 'application/json' in resp.getheader('content-type', ''):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
msg = "Only json/application content-type is supported."
raise lib_exc.InvalidContentType(msg)
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
if 'body' in kwargs:
if (hasattr(kwargs['body'], 'read')
and method.lower() in ('post', 'put')):
# We use 'Transfer-Encoding: chunked' because
# body size may not always be known in advance.
kwargs['headers']['Transfer-Encoding'] = 'chunked'
# Decorate the request with auth
req_url, kwargs['headers'], kwargs['body'] = \
self.auth_provider.auth_request(
method=method, url=url, headers=kwargs['headers'],
body=kwargs.get('body', None), filters=self.filters)
return self._http_request(req_url, method, **kwargs)
class OpenSSLConnectionDelegator(object):
"""
An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which httplib requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
# Ensure the socket is closed when this file is closed
kwargs['close'] = True
return socket._fileobject(self.connection, *args, **kwargs)
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
"""
Extended HTTPSConnection which uses the OpenSSL library
for enhanced SSL support.
Note: Much of this functionality can eventually be replaced
with native Python 3.3 code.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
ca_certs=None, timeout=None, insecure=False,
ssl_compression=True):
httplib.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.timeout = timeout
self.insecure = insecure
self.ssl_compression = ssl_compression
self.ca_certs = ca_certs
self.setcontext()
@staticmethod
def host_matches_cert(host, x509):
"""
Verify that the the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
# First see if we can match the CN
if x509.get_subject().commonName == host:
return True
# Also try Subject Alternative Names for a match
san_list = None
for i in moves.xrange(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
san_list = str(ext)
for san in ''.join(san_list.split()).split(','):
if san == "DNS:%s" % host:
return True
# Server certificate does not match host
msg = ('Host "%s" does not match x509 certificate contents: '
'CommonName "%s"' % (host, x509.get_subject().commonName))
if san_list is not None:
msg = msg + ', subjectAltName "%s"' % san_list
raise exc.SSLCertificateError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
raise exc.SSLCertificateError(msg)
if depth == 0 and preverify_ok is True:
# We verify that the host matches against the last
# certificate in the chain
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result
return preverify_ok
def setcontext(self):
"""
Set up the OpenSSL context.
"""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.ssl_compression is False:
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
self.verify_callback)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
raise exc.SSLConfigurationError(msg)
if self.key_file is None:
# We support having key and cert in same file
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = ('No key file specified and unable to load key '
'from "%s" %s' % (self.cert_file, e))
raise exc.SSLConfigurationError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
raise exc.SSLConfigurationError(msg)
if self.ca_certs:
try:
self.context.load_verify_locations(self.ca_certs)
except Exception as e:
msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
raise exc.SSLConfigurationError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
"""
Connect to an SSL port using the OpenSSL library and apply
per-connection parameters.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', self.timeout, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def close(self):
if self.sock:
# Remove the reference to the socket but don't close it yet.
# Response close will close both socket and associated
# file. Closing socket too soon will cause response
# reads to fail with socket IO error 'Bad file descriptor'.
self.sock = None
httplib.HTTPSConnection.close(self)
class ResponseBodyIterator(object):
"""A class that acts as an iterator over an HTTP response."""
def __init__(self, resp):
self.resp = resp
def __iter__(self):
while True:
yield self.next()
def next(self):
chunk = self.resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()
| |
import ast
import token
import tokenize
from os.path import islink
from StringIO import StringIO
from itertools import izip
from dxr.build import unignored
from dxr.filters import FILE, LINE
from dxr.indexers import (Extent, FileToIndex as FileToIndexBase,
iterable_per_line, Position, split_into_lines,
TreeToIndex as TreeToIndexBase,
QUALIFIED_FILE_NEEDLE, QUALIFIED_LINE_NEEDLE,
with_start_and_end)
from dxr.lines import Ref
from dxr.plugins.python.analysis import TreeAnalysis
from dxr.plugins.python.menus import ClassRef
from dxr.plugins.python.utils import (ClassFunctionVisitorMixin,
convert_node_to_name, local_name,
path_to_module, ast_parse)
mappings = {
FILE: {
'properties': {
'py_module': QUALIFIED_FILE_NEEDLE,
},
},
LINE: {
'properties': {
'py_type': QUALIFIED_LINE_NEEDLE,
'py_function': QUALIFIED_LINE_NEEDLE,
'py_derived': QUALIFIED_LINE_NEEDLE,
'py_bases': QUALIFIED_LINE_NEEDLE,
'py_callers': QUALIFIED_LINE_NEEDLE,
'py_overrides': QUALIFIED_LINE_NEEDLE,
'py_overridden': QUALIFIED_LINE_NEEDLE,
},
},
}
class _FileToIgnore(object):
"""A file that we don't want to bother indexing, usually due to
syntax errors.
"""
def is_interesting(self):
return False
FILE_TO_IGNORE = _FileToIgnore()
class TreeToIndex(TreeToIndexBase):
@property
def unignored_files(self):
return unignored(self.tree.source_folder, self.tree.ignore_paths,
self.tree.ignore_filenames)
def post_build(self):
paths = ((path, self.tree.source_encoding)
for path in self.unignored_files if is_interesting(path))
self.tree_analysis = TreeAnalysis(
python_path=self.plugin_config.python_path,
source_folder=self.tree.source_folder,
paths=paths)
def file_to_index(self, path, contents):
if path in self.tree_analysis.ignore_paths:
return FILE_TO_IGNORE
else:
return FileToIndex(path, contents, self.plugin_name, self.tree,
tree_analysis=self.tree_analysis)
class IndexingNodeVisitor(ast.NodeVisitor, ClassFunctionVisitorMixin):
"""Node visitor that walks through the nodes in an abstract syntax
tree and finds interesting things to index.
"""
def __init__(self, file_to_index, tree_analysis):
super(IndexingNodeVisitor, self).__init__()
self.file_to_index = file_to_index
self.tree_analysis = tree_analysis
self.needles = []
self.refs = []
def visit_FunctionDef(self, node):
# Index the function itself for the function: filter.
start, end = self.file_to_index.get_node_start_end(node)
if start is not None:
self.yield_needle('py_function', node.name, start, end)
super(IndexingNodeVisitor, self).visit_FunctionDef(node)
def visit_Call(self, node):
# Index function/method call sites
name = convert_node_to_name(node.func)
if name:
start, end = self.file_to_index.get_node_start_end(node)
if start is not None:
self.yield_needle('py_callers', name, start, end)
self.generic_visit(node)
def visit_ClassDef(self, node):
# Index the class itself for the type: filter.
start, end = self.file_to_index.get_node_start_end(node)
if start is not None:
self.yield_needle('py_type', node.name, start, end)
# Index the class hierarchy for classes for the derived: and
# bases: filters.
class_name = self.get_class_name(node)
bases = self.tree_analysis.get_base_classes(class_name,
set([class_name]))
for qualname in bases:
self.yield_needle(needle_type='py_derived',
name=local_name(qualname), qualname=qualname,
start=start, end=end)
derived_classes = self.tree_analysis.get_derived_classes(class_name,
set([class_name]))
for qualname in derived_classes:
self.yield_needle(needle_type='py_bases',
name=local_name(qualname), qualname=qualname,
start=start, end=end)
# Show a menu when hovering over this class.
self.yield_ref(start, end,
ClassRef(self.file_to_index.tree, class_name))
super(IndexingNodeVisitor, self).visit_ClassDef(node)
def visit_ClassFunction(self, class_node, function_node):
class_name = self.get_class_name(class_node)
function_qualname = class_name + '.' + function_node.name
start, end = self.file_to_index.get_node_start_end(function_node)
if start is None:
return
# Index this function as being overridden by other functions for
# the overridden: filter.
for qualname in self.tree_analysis.overridden_functions[function_qualname]:
name = qualname.rsplit('.')[-1]
self.yield_needle(needle_type='py_overridden',
name=name, qualname=qualname,
start=start, end=end)
# Index this function as overriding other functions for the
# overrides: filter.
for qualname in self.tree_analysis.overriding_functions[function_qualname]:
name = qualname.rsplit('.')[-1]
self.yield_needle(needle_type='py_overrides',
name=name, qualname=qualname,
start=start, end=end)
def get_class_name(self, class_node):
return self.file_to_index.abs_module_name + '.' + class_node.name
def yield_needle(self, *args, **kwargs):
needle = line_needle(*args, **kwargs)
self.needles.append(needle)
def yield_ref(self, start, end, ref):
self.refs.append((
self.file_to_index.char_offset(*start),
self.file_to_index.char_offset(*end),
ref,
))
class FileToIndex(FileToIndexBase):
def __init__(self, path, contents, plugin_name, tree, tree_analysis):
"""
:arg tree_analysis: TreeAnalysisResult object with the results
from the post-build analysis.
"""
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
self.tree_analysis = tree_analysis
self.abs_module_name = path_to_module(tree_analysis.python_path, self.path)
self._visitor = None
def is_interesting(self):
return super(FileToIndex, self).is_interesting() and is_interesting(self.path)
@property
def visitor(self):
"""Return IndexingNodeVisitor for this file, lazily creating and
running it if it doesn't exist yet.
"""
if not self._visitor:
self.node_start_table, self.call_start_table = self.analyze_tokens()
self._visitor = IndexingNodeVisitor(self, self.tree_analysis)
syntax_tree = ast_parse(self.contents)
self._visitor.visit(syntax_tree)
return self._visitor
def needles(self):
# Index module name. For practical purposes, this includes
# __init__.py files for packages even though that's not
# _technically_ a module.
yield file_needle('py_module',
name=local_name(self.abs_module_name),
qualname=self.abs_module_name)
def needles_by_line(self):
return iterable_per_line(
with_start_and_end(
split_into_lines(
self.visitor.needles
)
)
)
def refs(self):
return self.visitor.refs
def analyze_tokens(self):
"""Split the file into tokens and analyze them for data needed
for indexing.
"""
# Run the file contents through the tokenizer, both as unicode
# and as a utf-8 encoded string. This will allow us to build
# up a mapping between the byte offset and the character offset.
token_gen = tokenize.generate_tokens(StringIO(self.contents).readline)
utf8_token_gen = tokenize.generate_tokens(
StringIO(self.contents.encode('utf-8')).readline)
# These are a mapping from the utf-8 byte starting points provided by
# the ast nodes, to the unicode character offset tuples for both the
# start and the end points.
node_start_table = {}
call_start_table = {}
node_type, node_start = None, None
paren_level, paren_stack = 0, {}
for unicode_token, utf8_token in izip(token_gen, utf8_token_gen):
tok_type, tok_name, start, end, _ = unicode_token
utf8_start = utf8_token[2]
if tok_type == token.NAME:
# AST nodes for classes and functions point to the position of
# their 'def' and 'class' tokens. To get the position of their
# names, we look for 'def' and 'class' tokens and store the
# position of the token immediately following them.
if node_start and node_type == 'definition':
node_start_table[node_start[0]] = (start, end)
node_type, node_start = None, None
continue
if tok_name in ('def', 'class'):
node_type, node_start = 'definition', (utf8_start, start)
continue
# Record all name nodes in the token table. Currently unused,
# but will be needed for recording variable references.
node_start_table[utf8_start] = (start, end)
node_type, node_start = 'name', (utf8_start, start)
elif tok_type == token.OP:
# In order to properly capture the start and end of function
# calls, we need to keep track of the parens. Put the
# starting positions on a stack (here implemented with a dict
# so that it can be sparse), but only if the previous node was
# a name.
if tok_name == '(':
if node_type == 'name':
paren_stack[paren_level] = node_start
paren_level += 1
elif tok_name == ')':
paren_level -= 1
if paren_level in paren_stack:
call_start = paren_stack.pop(paren_level)
call_start_table[call_start[0]] = (call_start[1], end)
node_type, node_start = None, None
else:
node_type, node_start = None, None
return node_start_table, call_start_table
def get_node_start_end(self, node):
"""Return start and end positions within the file for the given
AST Node.
"""
loc = node.lineno, node.col_offset
if isinstance(node, ast.ClassDef) or isinstance(node, ast.FunctionDef):
start, end = self.node_start_table.get(loc, (None, None))
elif isinstance(node, ast.Call):
start, end = self.call_start_table.get(loc, (None, None))
else:
start, end = None, None
return start, end
def file_needle(needle_type, name, qualname=None):
data = {'name': name}
if qualname:
data['qualname'] = qualname
return needle_type, data
def line_needle(needle_type, name, start, end, qualname=None):
data = {
'name': name,
'start': start[1],
'end': end[1]
}
if qualname:
data['qualname'] = qualname
return (
needle_type,
data,
Extent(Position(row=start[0],
col=start[1]),
Position(row=end[0],
col=end[1]))
)
def is_interesting(path):
"""Determine if the file at the given path is interesting enough to
analyze.
"""
return path.endswith('.py') and not islink(path)
| |
#! /usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import contextlib
import itertools
import re
import os.path
from operator import attrgetter
from pathlib import Path
from exekall.utils import get_name, add_argument, NoValue, flatten_seq
from exekall.engine import ExprData, Consumer, PrebuiltOperator
from exekall.customization import AdaptorBase
from lisa.target import Target, TargetConf
from lisa.utils import HideExekallID, ArtifactPath, Serializable, get_nested_key, ExekallTaggable
from lisa.conf import MultiSrcConf
from lisa.tests.base import TestBundle, ResultBundleBase, Result
from lisa.regression import compute_regressions
class NonReusable:
pass
class ExekallArtifactPath(ArtifactPath, NonReusable):
@classmethod
def from_expr_data(cls, data: ExprData, consumer: Consumer) -> 'ExekallArtifactPath':
"""
Factory used when running under `exekall`
"""
artifact_dir = Path(data['expr_artifact_dir']).resolve()
consumer_name = get_name(consumer)
# Find a non-used directory
for i in itertools.count(1):
artifact_dir_ = Path(artifact_dir, consumer_name, str(i))
if not artifact_dir_.exists():
artifact_dir = artifact_dir_
break
cls.get_logger().info(f'Creating {consumer_name} artifact storage: {artifact_dir}')
artifact_dir.mkdir(parents=True)
# Get canonical absolute paths
artifact_dir = artifact_dir.resolve()
root = data['artifact_dir']
relative = artifact_dir.relative_to(root)
return cls(root, relative)
class LISAAdaptor(AdaptorBase):
name = 'LISA'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hidden_op_set = None
def get_non_reusable_type_set(self):
return {NonReusable}
def get_prebuilt_op_set(self):
non_reusable_type_set = self.get_non_reusable_type_set()
op_set = set()
# Try to build as many configurations instances from all the files we
# are given
conf_map = MultiSrcConf.from_yaml_map_list(self.args.conf)
for conf_cls, conf in conf_map.items():
op_set.add(PrebuiltOperator(
conf_cls, [conf],
non_reusable_type_set=non_reusable_type_set
))
# Inject serialized objects as root operators
for path in self.args.inject:
obj = Serializable.from_path(path)
op_set.add(PrebuiltOperator(type(obj), [obj],
non_reusable_type_set=non_reusable_type_set
))
# Inject a dummy empty TargetConf
if self.args.inject_empty_target_conf:
op_set.add(PrebuiltOperator(TargetConf, [TargetConf(conf={})],
non_reusable_type_set=non_reusable_type_set
))
return op_set
def get_hidden_op_set(self, op_set):
hidden_op_set = {
op for op in op_set
if issubclass(op.value_type, HideExekallID)
}
self.hidden_op_set = hidden_op_set
return hidden_op_set
def format_expr_list(self, expr_list, verbose=0):
def get_callable_events(callable_):
"""
Recursively unwraps all layers of wrappers, collecting the events
at each stage. That is needed in order to cope with things like
:class:`exekall.engine.UnboundMethod`.
"""
try:
used_events = callable_.used_events
except AttributeError:
events = set()
else:
events = set(used_events.get_all_events())
with contextlib.suppress(AttributeError):
events.update(get_callable_events(callable_.__wrapped__))
return events
def get_trace_events(expr):
events = get_callable_events(expr.op.callable_)
for param_expr in expr.param_map.values():
events.update(get_trace_events(param_expr))
return events
events = set()
for expr in expr_list:
events.update(get_trace_events(expr))
if events:
joiner = '\n - '
events_str = joiner + joiner.join(sorted(events))
else:
events_str = ' <no events>'
return f'Used trace events:{events_str}'
@staticmethod
def register_run_param(parser):
add_argument(parser, '--conf', action='append',
default=[],
help="LISA configuration file. If multiple configurations of a given type are found, they are merged (last one can override keys in previous ones). Only load trusted files as it can lead to arbitrary code execution.")
add_argument(parser, '--inject', action='append',
metavar='SERIALIZED_OBJECT_PATH',
default=[],
help="Serialized object to inject when building expressions")
# Create an empty TargetConf, so we are able to get the list of tests
# as if we were going to execute them using a target.
# note: that is only used for generating the documentation.
add_argument(parser, '--inject-empty-target-conf', action='store_true',
help=argparse.SUPPRESS)
@staticmethod
def register_compare_param(parser):
add_argument(parser, '--alpha', type=float,
default=5,
help="""Alpha risk for Fisher exact test in percents.""")
add_argument(parser, '--non-significant', action='store_true',
help="""Also show non-significant changes of failure rate.""")
add_argument(parser, '--remove-tag', action='append',
default=[],
help="""Remove the given tags in the testcase IDs before
comparison. Can be repeated.""")
def compare_db_list(self, db_list):
alpha = self.args.alpha / 100
show_non_significant = self.args.non_significant
def get_roots(db):
return {
froz_val
for froz_val in db.get_roots()
# Filter-out NoValue so it does not get counted as a failure,
# since bool(NoValue) is False
if froz_val.value is not NoValue
}
result_list_old, result_list_new = [
get_roots(db)
for db in db_list
]
regr_list = compute_regressions(
result_list_old,
result_list_new,
remove_tags=self.args.remove_tag,
alpha=alpha,
)
if not regr_list:
print('No matching test IDs have been found, use "--remove-tag board" to match across "board" tags')
return
print(f'testcase failure rate changes with alpha={alpha}\n')
id_len = max(len(regr.testcase_id) for regr in regr_list)
header = '{id:<{id_len}} old% new% delta% pvalue fix_iter# {regr_column}'.format(
id='testcase',
id_len=id_len,
regr_column=' significant' if show_non_significant else ''
)
print(header + '\n' + '-' * len(header))
for regr in regr_list:
if regr.significant or show_non_significant:
old_pc, new_pc = regr.failure_pc
# Only show the number of iterations required to validate a fix
# when there was a regression.
if regr.failure_delta_pc > 0:
validation_nr = regr.fix_validation_min_iter_nr
else:
validation_nr = ''
print('{id:<{id_len}} {old_pc:>5.1f}% {new_pc:>5.1f}% {delta_pc:>6.1f}% {pval:>9.2e} {validation_nr:>9} {significant}'.format(
id=regr.testcase_id,
old_pc=old_pc,
new_pc=new_pc,
delta_pc=regr.failure_delta_pc,
pval=regr.p_val,
id_len=id_len,
validation_nr=validation_nr,
significant='*' if regr.significant and show_non_significant else '',
))
@staticmethod
def _parse_uuid_attr(s):
uuid_attr = s.split('.', 1)
try:
uuid, attr = uuid_attr
except ValueError:
uuid = s
attr = None
return uuid, attr
@classmethod
def register_show_param(cls, parser):
uuid_attr_metavar = 'UUID[.ATTRIBUTE]'
add_argument(parser, '--show', action='append', default=[],
type=cls._parse_uuid_attr,
metavar=uuid_attr_metavar,
help="""Show the attribute value with given UUID, or one of its attribute.""")
add_argument(parser, '--show-yaml', action='append', default=[],
type=cls._parse_uuid_attr,
metavar=uuid_attr_metavar,
help="""Show the YAML dump of value with given UUID, or one of its attributes.""")
add_argument(parser, '--serialize', nargs=2, action='append', default=[],
metavar=(uuid_attr_metavar, 'PATH'),
help="""Serialize the value of given UUID to PATH.""")
def show_db(self, db):
parse_uuid_attr = self._parse_uuid_attr
def indent(s):
idt = ' ' * 4
return idt + s.replace('\n', '\n' + idt)
def get_uuid(uuid):
try:
froz_val = db.get_by_uuid(uuid)
except KeyError as e:
raise KeyError(f'UUID={uuid} not found in the database') from e
else:
return froz_val
def get_obj(froz_val):
val = froz_val.value
excep = froz_val.excep
if val is NoValue and excep is not NoValue:
return excep
else:
return val
def get_attr_key(obj, attr_key):
# parse "attr[key1][key2][...]"
attr = attr_key.split('[', 1)[0]
keys = re.findall(r'\[(.*?)\]', attr_key)
if attr:
obj = getattr(obj, attr)
return get_nested_key(obj, keys)
def resolve_attr(obj, attr_key):
if attr_key is None:
return obj
try:
attr_key, remainder = attr_key.split('.', 1)
except ValueError:
return get_attr_key(obj, attr_key)
else:
obj = get_attr_key(obj, attr_key)
return resolve_attr(obj, remainder)
args = self.args
if not (args.show or args.show_yaml):
super().show_db(db)
attr_map = {}
for uuid, attr in args.show:
attr_map.setdefault(uuid, set()).add(attr)
if len(args.show) == 1:
show_format = '{val}'
else:
show_format = 'UUID={uuid} {type}{attr}{eq}{val}'
serialize_spec_list = args.serialize
yaml_show_spec_list = args.show_yaml
for uuid, attr_set in attr_map.items():
attr_list = sorted(attr_set)
froz_val = get_uuid(uuid)
obj = get_obj(froz_val)
for attr in attr_list:
attr_value = resolve_attr(obj, attr)
attr_str = str(attr_value)
if '\n' in attr_str:
attr_str = '\n' + indent(attr_str)
eq = ':'
else:
eq = '='
print(show_format.format(
uuid=froz_val.uuid,
type=get_name(type(obj)),
attr='.' + attr if attr else '',
val=attr_str,
eq=eq,
))
if len(yaml_show_spec_list) == 1:
yaml_show_format = '{yaml}'
def yaml_indent(x):
return x
else:
yaml_show_format = 'UUID={uuid} {type}:\n\n{yaml}'
yaml_indent = indent
for uuid, attr in yaml_show_spec_list:
froz_val = get_uuid(uuid)
obj = get_obj(froz_val)
value = resolve_attr(obj, attr)
if isinstance(value, Serializable):
yaml_str = value.to_yaml()
else:
yaml_str = Serializable._to_yaml(value)
print(yaml_show_format.format(
uuid=uuid,
type=get_name(type(value)),
yaml=yaml_indent(yaml_str),
))
for uuid_attr, path in serialize_spec_list:
uuid, attr = parse_uuid_attr(uuid_attr)
froz_val = get_uuid(uuid)
obj = get_obj(froz_val)
value = resolve_attr(obj, attr)
if isinstance(value, Serializable):
value.to_path(path)
else:
Serializable._to_path(value, path, fmt='yaml')
return 0
@staticmethod
def get_default_type_goal_pattern_set():
return {'*.ResultBundleBase'}
@classmethod
def reload_db(cls, db, path=None):
# If path is not known, we cannot do anything here
if not path:
return db
# This will relocate ArtifactPath instances to the new absolute path of
# the results folder, in case it has been moved to another place
artifact_dir = Path(path).parent.resolve()
# Relocate ArtifactPath embeded in objects so they will always
# contain an absolute path that adapts to the local filesystem
for serial in db.get_all():
val = serial.value
try:
dct = val.__dict__
except AttributeError:
continue
for attr, attr_val in dct.items():
if isinstance(attr_val, ArtifactPath):
new_path = attr_val.with_root(artifact_dir)
# Only update paths to existing files, otherwise assume it
# was pointing outside the artifact_dir and therefore
# should not be fixed up
if os.path.exists(new_path):
setattr(val, attr, new_path)
return db
def finalize_expr(self, expr):
expr_artifact_dir = expr.data['expr_artifact_dir']
artifact_dir = expr.data['artifact_dir']
for expr_val in expr.get_all_vals():
self._finalize_expr_val(expr_val, artifact_dir, expr_artifact_dir)
def _finalize_expr_val(self, expr_val, artifact_dir, expr_artifact_dir):
val = expr_val.value
def needs_rewriting(val):
# Only rewrite ArtifactPath path values
if not isinstance(val, ArtifactPath):
return False
# And only if they are a subfolder of artifact_dir. Otherwise, they
# are something pointing outside of the artifact area, which we
# cannot handle.
return artifact_dir.resolve() in Path(val).resolve().parents
# Add symlinks to artifact folders for ExprValue that were used in the
# ExprValue graph, but were initially computed for another Expression
if needs_rewriting(val):
val = Path(val)
is_subfolder = (expr_artifact_dir.resolve() in val.resolve().parents)
# The folder is reachable from our ExprValue, but is not a
# subfolder of the expr_artifact_dir, so we want to get a
# symlink to it
if not is_subfolder:
# We get the name of the callable
callable_folder = val.parts[-2]
folder = expr_artifact_dir / callable_folder
# We build a relative path back in the hierarchy to the root of
# all artifacts
relative_artifact_dir = Path(os.path.relpath(str(artifact_dir), start=str(folder)))
# The target needs to be a relative symlink, so we replace the
# absolute artifact_dir by a relative version of it
target = relative_artifact_dir / val.relative_to(artifact_dir)
with contextlib.suppress(FileExistsError):
folder.mkdir(parents=True)
for i in itertools.count(1):
symlink = Path(folder, str(i))
if not symlink.exists():
break
symlink.symlink_to(target, target_is_directory=True)
for param_expr_val in expr_val.param_map.values():
self._finalize_expr_val(param_expr_val, artifact_dir, expr_artifact_dir)
@classmethod
def get_tags(cls, value):
if isinstance(value, ExekallTaggable):
tags = value.get_tags()
else:
tags = super().get_tags(value)
tags = {k: v for k, v in tags.items() if v is not None}
return tags
def get_run_exit_code(self, result_map):
expr_val_list = flatten_seq(
expr_val_list
for expr, expr_val_list in result_map.items()
)
for expr_val in expr_val_list:
# An exception happened
if expr_val.get_excep():
return 20
val = expr_val.value
if isinstance(val, ResultBundleBase):
if val.result is Result.FAILED:
return 10
return 0
def format_result(self, expr_val):
val = expr_val.value
if val is NoValue or val is None:
res_exceps = [
excep
for excep in map(
attrgetter('excep'),
expr_val.get_excep()
)
if isinstance(excep, ResultBundleBase)
]
if res_exceps:
return '\n'.join(
res.pretty_format()
for res in res_exceps
)
else:
return super().format_result(expr_val)
else:
if isinstance(val, ResultBundleBase):
return val.pretty_format()
else:
return str(val)
| |
# by amounra 0216 : http://www.aumhaa.com
# written against Live 9.6 release on 021516
from __future__ import absolute_import, print_function
import Live
from ableton.v2.base import listens, forward_property, clamp, listenable_property, depends, task, liveobj_valid, liveobj_changed
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ButtonControl, ToggleButtonControl, control_color
from aumhaa.v2.base import initialize_debug
debug = initialize_debug()
"""A component to store and recall individual values in registers corresponding to the currently selected tracks Channel assignment"""
CHANNELS = ['Ch. 2', 'Ch. 3', 'Ch. 4', 'Ch. 5', 'Ch. 6', 'Ch. 7', 'Ch. 8', 'Ch. 9', 'Ch. 10', 'Ch. 11', 'Ch. 12', 'Ch. 13', 'Ch. 14']
class ChannelizedSettingsBase(Component):
@depends(parent_task_group = None)
def __init__(self, value_dict = [False, True], parent_task_group = None, number_channels = 16, default_value_index = 0, default_channel = 0, channel_list = CHANNELS, *a, **k):
self._value_dict = value_dict
self._range = len(value_dict)
self._number_channels = number_channels
self._channel = default_channel
self._values = [default_value_index for x in range(number_channels)]
self._channel_list = channel_list
self._parent_task_group = parent_task_group
super(ChannelizedSettingsBase, self).__init__(*a, **k)
self._on_selected_track_changed.subject = self.song.view
self._update_task = parent_task_group.add(task.sequence(task.wait(.01), task.run(self.update)))
self._update_task.kill()
def _get_current_channel(self):
cur_track = self.song.view.selected_track
cur_chan = cur_track.current_input_sub_routing
if len(cur_chan) == 0:
cur_chan = 'All Channels'
if cur_chan == 'All Channels':
cur_chan = 1
if cur_chan in self._channel_list:
cur_chan = (self._channel_list.index(cur_chan)%15)+1
else:
cur_chan = 14
return cur_chan
@listens('selected_track')
def _on_selected_track_changed(self):
self._channel = self._get_current_channel()
self._update_task.restart()
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, channel):
self._channel = clamp(channel, 0, self._number_channels)
self.update()
"""self._values is array of indexes, they must be converted to actual values before reporting"""
@listenable_property
def value(self):
return self._value_dict[self.index]
@value.setter
def value(self, value):
if value in self._value_dict:
self._values[self._channel] = self._value_dict.index(value)
self.update()
def set_value(self, value):
if value in self._value_dict:
self._values[self._channel] = self._value_dict.index(value)
@listenable_property
def index(self):
return self._values[self._channel]
@index.setter
def index(self, index):
if index in range(self._range):
self._values[self._channel] = index
self.update()
def set_index(self, index):
if index in range(self._range):
self._values[self._channel] = index
def _update_controls(self):
pass
def update(self):
if self.is_enabled():
self.notify_index(self.index)
self.notify_value(self.value)
self._update_controls()
def on_enabled_changed(self):
self._update_task.restart()
class ToggledChannelizedSettingsComponent(ChannelizedSettingsBase):
toggle_button = ToggleButtonControl()
def __init__(self, toggled_color = 'DefaultButton.On', untoggled_color = 'DefaultButton.Off', *a, **k):
super(ToggledChannelizedSettingsComponent, self).__init__(value_dict = [False, True], *a, **k)
self.toggle_button.toggled_color = toggled_color
self.toggle_button.untoggled_color = untoggled_color
@toggle_button.toggled
def toggle_button(self, toggled, button):
self.index = int(toggled)
self.update()
def _update_controls(self):
self.toggle_button.is_toggled = bool(self._values[self._channel])
class ScrollingChannelizedSettingsComponent(ChannelizedSettingsBase):
up_button = ButtonControl(repeat=True)
down_button = ButtonControl(repeat=True)
bank_up_button = ButtonControl(repeat=True)
bank_down_button = ButtonControl(repeat=True)
shift_toggle = ToggleButtonControl()
def __init__(self, bank_increment = 16, on_color = 'DefaultButton.On', off_color = 'DefaultButton.Off', bank_on_color = 'DefaultButton.On', bank_off_color = 'DefaultButton.Off', *a, **k):
super(ScrollingChannelizedSettingsComponent, self).__init__(*a, **k)
self._bank_increment = bank_increment
self.up_button.color = on_color
self.up_button.disabled_color = off_color
self.down_button.color = on_color
self.down_button.disabled_color = off_color
self.shift_toggle.toggled_color = on_color
self.shift_toggle.untoggled_color = off_color
@up_button.pressed
def up_button(self, button):
if self.shift_toggle.is_toggled:
value = self._values[self._channel]
self.index = clamp(value+self._bank_increment, 0, (self._range-1))
else:
value = self._values[self._channel]
self.index = clamp(value+1, 0, (self._range-1))
@down_button.pressed
def down_button(self, button):
if self.shift_toggle.is_toggled:
value = self._values[self._channel]
self.index = clamp(value-self._bank_increment, 0, (self._range-1))
else:
value = self._values[self._channel]
self.index = clamp(value-1, 0, (self._range-1))
@bank_up_button.pressed
def bank_up_button(self, button):
value = self._values[self._channel]
self.index = clamp(value+self._bank_increment, 0, (self._range-1))
@bank_down_button.pressed
def bank_down_button(self, button):
value = self._values[self._channel]
self.index = clamp(value-self._bank_increment, 0, (self._range-1))
def _update_controls(self):
at_beginning = self.index == 0
at_end = self.index == (self._range - 1)
self.up_button.enabled = not at_end
self.down_button.enabled = not at_beginning
self.bank_up_button.enabled = not at_end
self.bank_down_button.enabled = not at_beginning
def buttons_are_pressed(self):
return self.up_button.is_pressed or self.down_button.is_pressed or self.bank_up_button.is_pressed or self.bank_down_button.is_pressed
class TaggedSettingsComponent(ScrollingChannelizedSettingsComponent):
_set_attribute_tag_model = lambda self, a: int(a)
_track_has_tagged_attribute = False
_last_track = None
def __init__(self, attribute_tag = None, *a, **k):
self._attribute_tag = '@'+attribute_tag+':'
super(TaggedSettingsComponent, self).__init__(*a, **k)
self._read_tag_task = self._parent_task_group.add(task.sequence(task.wait(.1), task.run(self._read_attribute_tag)))
self._read_tag_task.kill()
self._set_tag_task = self._parent_task_group.add(task.sequence(task.wait(.1), task.run(self._set_attribute_tag)))
self._set_tag_task.kill()
@listens('selected_track')
def _on_selected_track_changed(self):
super(TaggedSettingsComponent, self)._on_selected_track_changed()
#debug('setting tagged to False')
if liveobj_changed(self.song.view.selected_track, self._last_track):
self._last_track = self.song.view.selected_track
self._track_has_tagged_attribute = False
self._read_attribute_tag()
def _read_attribute_tag(self):
devices = self.song.view.selected_track.devices
device = len(devices) and devices[0] or None
if liveobj_valid(device):
name = device.name
for item in name.split(' '):
if item.startswith(self._attribute_tag):
entry = self._set_attribute_tag_model(item[len(self._attribute_tag):])
try:
self._values[self._channel] = self._value_dict.index(entry)
except:
debug('cant read attribute error for:', device.name, entry)
#debug('setting tagged to True')
self._track_has_tagged_attribute = True
def _set_attribute_tag(self):
devices = self.song.view.selected_track.devices
device = len(devices) and devices[0] or None
if liveobj_valid(device):
name = device.name.split(' ')
for item in name:
if item.startswith(self._attribute_tag):
entry = self._attribute_tag+str(self._value_dict[self._values[self._channel]])
try:
name[name.index(item)] = str(entry)
device.name = ' '.join(name)
except:
debug('cant set attribute error for:', device.name, entry, ''.join(name))
def update(self):
super(TaggedSettingsComponent, self).update()
if self._track_has_tagged_attribute:
self._set_tag_task.restart()
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import re
import time
import eventlet
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from pyroute2.netlink.rtnl import ifinfmsg
from pyroute2 import NetlinkError
from pyroute2 import netns
import six
from neutron._i18n import _
from neutron.agent.common import utils
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.privileged.agent.linux import ip_lib as privileged
LOG = logging.getLogger(__name__)
IP_NONLOCAL_BIND = 'net.ipv4.ip_nonlocal_bind'
LOOPBACK_DEVNAME = 'lo'
GRE_TUNNEL_DEVICE_NAMES = ['gre0', 'gretap0']
SYS_NET_PATH = '/sys/class/net'
DEFAULT_GW_PATTERN = re.compile(r"via (\S+)")
METRIC_PATTERN = re.compile(r"metric (\S+)")
DEVICE_NAME_PATTERN = re.compile(r"(\d+?): (\S+?):.*")
def remove_interface_suffix(interface):
"""Remove a possible "<if>@<endpoint>" suffix from an interface' name.
This suffix can appear in some kernel versions, and intends on specifying,
for example, a veth's pair. However, this interface name is useless to us
as further 'ip' commands require that the suffix be removed.
"""
# If '@' is not present, this will do nothing.
return interface.partition("@")[0]
class AddressNotReady(exceptions.NeutronException):
message = _("Failure waiting for address %(address)s to "
"become ready: %(reason)s")
class InvalidArgument(exceptions.NeutronException):
message = _("Invalid value %(value)s for parameter %(parameter)s "
"provided.")
class SubProcessBase(object):
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, run_as_root=True)
else:
return self._execute(options, command, args)
def _as_root(self, options, command, args, use_root_namespace=False):
namespace = self.namespace if not use_root_namespace else None
return self._execute(options, command, args, run_as_root=True,
namespace=namespace)
def _execute(self, options, command, args, run_as_root=False,
namespace=None):
opt_list = ['-%s' % o for o in options]
ip_cmd = add_namespace_to_cmd(['ip'], namespace)
cmd = ip_cmd + opt_list + [command] + list(args)
return utils.execute(cmd, run_as_root=run_as_root,
log_fail_as_error=self.log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
def get_log_fail_as_error(self):
return self.log_fail_as_error
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, namespace=self.namespace)
def get_devices(self, exclude_loopback=True, exclude_gre_devices=True):
retval = []
if self.namespace:
# we call out manually because in order to avoid screen scraping
# iproute2 we use find to see what is in the sysfs directory, as
# suggested by Stephen Hemminger (iproute2 dev).
try:
cmd = ['ip', 'netns', 'exec', self.namespace,
'find', SYS_NET_PATH, '-maxdepth', '1',
'-type', 'l', '-printf', '%f ']
output = utils.execute(
cmd,
run_as_root=True,
log_fail_as_error=self.log_fail_as_error).split()
except RuntimeError:
# We could be racing with a cron job deleting namespaces.
# Just return a empty list if the namespace is deleted.
with excutils.save_and_reraise_exception() as ctx:
if not self.netns.exists(self.namespace):
ctx.reraise = False
return []
else:
output = (
i for i in os.listdir(SYS_NET_PATH)
if os.path.islink(os.path.join(SYS_NET_PATH, i))
)
for name in output:
if (exclude_loopback and name == LOOPBACK_DEVNAME or
exclude_gre_devices and name in GRE_TUNNEL_DEVICE_NAMES):
continue
retval.append(IPDevice(name, namespace=self.namespace))
return retval
def get_device_by_ip(self, ip):
"""Get the IPDevice from system which has ip configured.
@param ip: look for the device holding this ip. If this is None,
None is returned.
@type ip: str.
"""
if not ip:
return None
addr = IpAddrCommand(self)
devices = addr.get_devices_with_ip(to=ip)
if devices:
return IPDevice(devices[0]['name'], namespace=self.namespace)
def add_tuntap(self, name, mode='tap'):
privileged.create_interface(
name, self.namespace, "tuntap", mode=mode)
return IPDevice(name, namespace=self.namespace)
def add_veth(self, name1, name2, namespace2=None):
# TODO(slaweq): switch to pyroute2 when issue
# https://github.com/svinota/pyroute2/issues/463
# will be closed
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root([], 'link', tuple(args))
return (IPDevice(name1, namespace=self.namespace),
IPDevice(name2, namespace=namespace2))
def add_macvtap(self, name, src_dev, mode='bridge'):
privileged.create_interface(name,
self.namespace,
"macvtap",
physical_interface=src_dev,
mode=mode)
return IPDevice(name, namespace=self.namespace)
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
privileged.delete_interface(name, self.namespace)
def add_dummy(self, name):
"""Create a Linux dummy interface with the given name."""
privileged.create_interface(name, self.namespace, "dummy")
return IPDevice(name, namespace=self.namespace)
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(namespace=name)
return ip
def namespace_is_empty(self):
return not self.get_devices()
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vlan(self, name, physical_interface, vlan_id):
privileged.create_interface(name,
self.namespace,
"vlan",
physical_interface=physical_interface,
vlan_id=vlan_id)
return IPDevice(name, namespace=self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, srcport=None, dstport=None, proxy=False):
kwargs = {'vxlan_id': vni}
if group:
kwargs['vxlan_group'] = group
if dev:
kwargs['physical_interface'] = dev
if ttl:
kwargs['vxlan_ttl'] = ttl
if tos:
kwargs['vxlan_tos'] = tos
if local:
kwargs['vxlan_local'] = local
if proxy:
kwargs['vxlan_proxy'] = proxy
# tuple: min,max
if srcport:
if len(srcport) == 2 and srcport[0] <= srcport[1]:
kwargs['vxlan_port_range'] = (str(srcport[0]), str(srcport[1]))
else:
raise n_exc.NetworkVxlanPortRangeError(vxlan_range=srcport)
if dstport:
kwargs['vxlan_port'] = dstport
privileged.create_interface(name, self.namespace, "vxlan", **kwargs)
return (IPDevice(name, namespace=self.namespace))
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None):
super(IPDevice, self).__init__(namespace=namespace)
self._name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name and
self.namespace == other.namespace)
def __str__(self):
return self.name
def __repr__(self):
return "<IPDevice(name=%s, namespace=%s)>" % (self._name,
self.namespace)
def exists(self):
"""Return True if the device exists in the namespace."""
return privileged.interface_exists(self.name, self.namespace)
def delete_addr_and_conntrack_state(self, cidr):
"""Delete an address along with its conntrack state
This terminates any active connections through an IP.
:param cidr: the IP address for which state should be removed.
This can be passed as a string with or without /NN.
A netaddr.IPAddress or netaddr.Network representing the IP address
can also be passed.
"""
self.addr.delete(cidr)
ip_str = str(netaddr.IPNetwork(cidr).ip)
ip_wrapper = IPWrapper(namespace=self.namespace)
# Delete conntrack state for ingress traffic
# If 0 flow entries have been deleted
# conntrack -D will return 1
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception("Failed deleting ingress connection state of"
" floatingip %s", ip_str)
# Delete conntrack state for egress traffic
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception("Failed deleting egress connection state of"
" floatingip %s", ip_str)
def delete_socket_conntrack_state(self, cidr, dport, protocol):
ip_str = str(netaddr.IPNetwork(cidr).ip)
ip_wrapper = IPWrapper(namespace=self.namespace)
cmd = ["conntrack", "-D", "-d", ip_str, '-p', protocol,
'--dport', dport]
try:
ip_wrapper.netns.execute(cmd, check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception("Failed deleting ingress connection state of "
"socket %(ip)s:%(port)s", {'ip': ip_str,
'port': dport})
def disable_ipv6(self):
if not ipv6_utils.is_enabled_and_bind_by_default():
return
sysctl_name = re.sub(r'\.', '/', self.name)
cmd = ['net.ipv6.conf.%s.disable_ipv6=1' % sysctl_name]
return sysctl(cmd, namespace=self.namespace)
@property
def name(self):
if self._name:
return self._name[:constants.DEVICE_NAME_MAX_LEN]
return self._name
@name.setter
def name(self, name):
self._name = name
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, options, args):
return self._parent._run(options, self.COMMAND, args)
def _as_root(self, options, args, use_root_namespace=False):
return self._parent._as_root(options,
self.COMMAND,
args,
use_root_namespace=use_root_namespace)
class IPRule(SubProcessBase):
def __init__(self, namespace=None):
super(IPRule, self).__init__(namespace=namespace)
self.rule = IpRuleCommand(self)
class IpRuleCommand(IpCommandBase):
COMMAND = 'rule'
@staticmethod
def _make_canonical(ip_version, settings):
"""Converts settings to a canonical representation to compare easily"""
def canonicalize_fwmark_string(fwmark_mask):
"""Reformats fwmark/mask in to a canonical form
Examples, these are all equivalent:
"0x1"
0x1
"0x1/0xfffffffff"
(0x1, 0xfffffffff)
:param fwmark_mask: The firewall and mask (default 0xffffffff)
:type fwmark_mask: A string with / as delimiter, an iterable, or a
single value.
"""
# Turn the value we were passed in to an iterable: fwmark[, mask]
if isinstance(fwmark_mask, six.string_types):
# A / separates the optional mask in a string
iterable = fwmark_mask.split('/')
else:
try:
iterable = iter(fwmark_mask)
except TypeError:
# At this point, it must be a single integer
iterable = [fwmark_mask]
def to_i(s):
if isinstance(s, six.string_types):
# Passing 0 as "base" arg to "int" causes it to determine
# the base automatically.
return int(s, 0)
# s isn't a string, can't specify base argument
return int(s)
integers = [to_i(x) for x in iterable]
# The default mask is all ones, the mask is 32 bits.
if len(integers) == 1:
integers.append(0xffffffff)
# We now have two integers in a list. Convert to canonical string.
return '{0:#x}/{1:#x}'.format(*integers)
def canonicalize(item):
k, v = item
# ip rule shows these as 'any'
if k == 'from' and v == 'all':
return k, constants.IP_ANY[ip_version]
# lookup and table are interchangeable. Use table every time.
if k == 'lookup':
return 'table', v
if k == 'fwmark':
return k, canonicalize_fwmark_string(v)
return k, v
if 'type' not in settings:
settings['type'] = 'unicast'
return {k: str(v) for k, v in map(canonicalize, settings.items())}
def _parse_line(self, ip_version, line):
# Typical rules from 'ip rule show':
# 4030201: from 1.2.3.4/24 lookup 10203040
# 1024: from all iif qg-c43b1928-48 lookup noscope
parts = line.split()
if not parts:
return {}
# Format of line is: "priority: <key> <value> ... [<type>]"
settings = {k: v for k, v in zip(parts[1::2], parts[2::2])}
settings['priority'] = parts[0][:-1]
if len(parts) % 2 == 0:
# When line has an even number of columns, last one is the type.
settings['type'] = parts[-1]
return self._make_canonical(ip_version, settings)
def list_rules(self, ip_version):
lines = self._as_root([ip_version], ['show']).splitlines()
return [self._parse_line(ip_version, line) for line in lines]
def _exists(self, ip_version, **kwargs):
return kwargs in self.list_rules(ip_version)
def _make__flat_args_tuple(self, *args, **kwargs):
for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]):
args += kwargs_item
return tuple(args)
def add(self, ip, **kwargs):
ip_version = common_utils.get_ip_version(ip)
# In case we need to add a rule based on an incoming
# interface, pass the "any" IP address, for example, 0.0.0.0/0,
# else pass the given IP.
if kwargs.get('iif'):
kwargs.update({'from': constants.IP_ANY[ip_version]})
else:
kwargs.update({'from': ip})
canonical_kwargs = self._make_canonical(ip_version, kwargs)
if not self._exists(ip_version, **canonical_kwargs):
args_tuple = self._make__flat_args_tuple('add', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
def delete(self, ip, **kwargs):
ip_version = common_utils.get_ip_version(ip)
# In case we need to delete a rule based on an incoming
# interface, pass the "any" IP address, for example, 0.0.0.0/0,
# else pass the given IP.
if kwargs.get('iif'):
kwargs.update({'from': constants.IP_ANY[ip_version]})
else:
kwargs.update({'from': ip})
canonical_kwargs = self._make_canonical(ip_version, kwargs)
args_tuple = self._make__flat_args_tuple('del', **canonical_kwargs)
self._as_root([ip_version], args_tuple)
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
privileged.set_link_attribute(
self.name, self._parent.namespace, address=mac_address)
def set_allmulticast_on(self):
privileged.set_link_flags(
self.name, self._parent.namespace, ifinfmsg.IFF_ALLMULTI)
def set_mtu(self, mtu_size):
try:
privileged.set_link_attribute(
self.name, self._parent.namespace, mtu=mtu_size)
except NetlinkError as e:
if e.code == errno.EINVAL:
raise InvalidArgument(parameter="MTU", value=mtu_size)
raise
def set_up(self):
privileged.set_link_attribute(
self.name, self._parent.namespace, state='up')
def set_down(self):
privileged.set_link_attribute(
self.name, self._parent.namespace, state='down')
def set_netns(self, namespace):
privileged.set_link_attribute(
self.name, self._parent.namespace, net_ns_fd=namespace)
self._parent.namespace = namespace
def set_name(self, name):
privileged.set_link_attribute(
self.name, self._parent.namespace, ifname=name)
self._parent.name = name
def set_alias(self, alias_name):
privileged.set_link_attribute(
self.name, self._parent.namespace, ifalias=alias_name)
def delete(self):
privileged.delete_interface(self.name, self._parent.namespace)
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def allmulticast(self):
return self.attributes.get('allmulticast')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return privileged.get_link_attributes(self.name,
self._parent.namespace)
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, cidr, scope='global', add_broadcast=True):
add_ip_address(cidr, self.name, self._parent.namespace, scope,
add_broadcast)
def delete(self, cidr):
delete_ip_address(cidr, self.name, self._parent.namespace)
def flush(self, ip_version):
flush_ip_addresses(ip_version, self.name, self._parent.namespace)
def get_devices_with_ip(self, name=None, scope=None, to=None,
filters=None, ip_version=None):
"""Get a list of all the devices with an IP attached in the namespace.
:param name: if it's not None, only a device with that matching name
will be returned.
:param scope: address scope, for example, global, link, or host
:param to: IP address or cidr to match. If cidr then it will match
any IP within the specified subnet
:param filters: list of any other filters supported by /sbin/ip
:param ip_version: 4 or 6
"""
options = [ip_version] if ip_version else []
args = ['show']
if name:
args += [name]
if filters:
args += filters
if scope:
args += ['scope', scope]
if to:
args += ['to', to]
retval = []
for line in self._run(options, tuple(args)).split('\n'):
line = line.strip()
match = DEVICE_NAME_PATTERN.search(line)
if match:
# Found a match for a device name, but its' addresses will
# only appear in following lines, so we may as well continue.
device_name = remove_interface_suffix(match.group(2))
continue
elif not line.startswith('inet'):
continue
parts = line.split(" ")
broadcast = None
if parts[0] == 'inet6':
scope = parts[3]
else:
if parts[2] == 'brd':
broadcast = parts[3]
scope = parts[5]
else:
scope = parts[3]
retval.append(dict(name=device_name,
cidr=parts[1],
scope=scope,
broadcast=broadcast,
dynamic=('dynamic' == parts[-1]),
tentative=('tentative' in line),
dadfailed=('dadfailed' == parts[-1])))
return retval
def list(self, scope=None, to=None, filters=None, ip_version=None):
"""Get device details of a device named <self.name>."""
return self.get_devices_with_ip(
self.name, scope, to, filters, ip_version)
def wait_until_address_ready(self, address, wait_time=30):
"""Wait until an address is no longer marked 'tentative'
raises AddressNotReady if times out or address not present on interface
"""
def is_address_ready():
try:
addr_info = self.list(to=address)[0]
except IndexError:
raise AddressNotReady(
address=address,
reason=_('Address not present on interface'))
if not addr_info['tentative']:
return True
if addr_info['dadfailed']:
raise AddressNotReady(
address=address, reason=_('Duplicate address detected'))
return False
errmsg = _("Exceeded %s second limit waiting for "
"address to leave the tentative state.") % wait_time
common_utils.wait_until_true(
is_address_ready, timeout=wait_time, sleep=0.20,
exception=AddressNotReady(address=address, reason=errmsg))
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def __init__(self, parent, table=None):
super(IpRouteCommand, self).__init__(parent)
self._table = table
def table(self, table):
"""Return an instance of IpRouteCommand which works on given table"""
return IpRouteCommand(self._parent, table)
def _table_args(self, override=None):
if override:
return ['table', override]
return ['table', self._table] if self._table else []
def _dev_args(self):
return ['dev', self.name] if self.name else []
def add_gateway(self, gateway, metric=None, table=None):
ip_version = common_utils.get_ip_version(gateway)
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += self._dev_args()
args += self._table_args(table)
self._as_root([ip_version], tuple(args))
def _run_as_root_detect_device_not_found(self, options, args):
try:
return self._as_root(options, tuple(args))
except RuntimeError as rte:
with excutils.save_and_reraise_exception() as ctx:
if "Cannot find device" in str(rte):
ctx.reraise = False
raise exceptions.DeviceNotFoundError(device_name=self.name)
def delete_gateway(self, gateway, table=None):
ip_version = common_utils.get_ip_version(gateway)
args = ['del', 'default',
'via', gateway]
args += self._dev_args()
args += self._table_args(table)
self._run_as_root_detect_device_not_found([ip_version], args)
def _parse_routes(self, ip_version, output, **kwargs):
for line in output.splitlines():
parts = line.split()
# Format of line is: "<cidr>|default [<key> <value>] ..."
route = {k: v for k, v in zip(parts[1::2], parts[2::2])}
route['cidr'] = parts[0]
# Avoids having to explicitly pass around the IP version
if route['cidr'] == 'default':
route['cidr'] = constants.IP_ANY[ip_version]
# ip route drops things like scope and dev from the output if it
# was specified as a filter. This allows us to add them back.
if self.name:
route['dev'] = self.name
if self._table:
route['table'] = self._table
# Callers add any filters they use as kwargs
route.update(kwargs)
yield route
def list_routes(self, ip_version, **kwargs):
args = ['list']
args += self._dev_args()
args += self._table_args()
for k, v in kwargs.items():
args += [k, v]
output = self._run([ip_version], tuple(args))
return [r for r in self._parse_routes(ip_version, output, **kwargs)]
def list_onlink_routes(self, ip_version):
routes = self.list_routes(ip_version, scope='link')
return [r for r in routes if 'src' not in r]
def add_onlink_route(self, cidr):
self.add_route(cidr, scope='link')
def delete_onlink_route(self, cidr):
self.delete_route(cidr, scope='link')
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['list']
args += self._dev_args()
args += self._table_args()
if filters:
args += filters
retval = None
if scope:
args += ['scope', scope]
route_list_lines = self._run(options, tuple(args)).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
retval = dict()
gateway = DEFAULT_GW_PATTERN.search(default_route_line)
if gateway:
retval.update(gateway=gateway.group(1))
metric = METRIC_PATTERN.search(default_route_line)
if metric:
retval.update(metric=int(metric.group(1)))
return retval
def flush(self, ip_version, table=None, **kwargs):
args = ['flush']
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._as_root([ip_version], tuple(args))
def add_route(self, cidr, via=None, table=None, **kwargs):
ip_version = common_utils.get_ip_version(cidr)
args = ['replace', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._run_as_root_detect_device_not_found([ip_version], args)
def delete_route(self, cidr, via=None, table=None, **kwargs):
ip_version = common_utils.get_ip_version(cidr)
args = ['del', cidr]
if via:
args += ['via', via]
args += self._dev_args()
args += self._table_args(table)
for k, v in kwargs.items():
args += [k, v]
self._run_as_root_detect_device_not_found([ip_version], args)
class IPRoute(SubProcessBase):
def __init__(self, namespace=None, table=None):
super(IPRoute, self).__init__(namespace=namespace)
self.name = None
self.route = IpRouteCommand(self, table=table)
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_address, mac_address, **kwargs):
add_neigh_entry(ip_address,
mac_address,
self.name,
self._parent.namespace,
**kwargs)
def delete(self, ip_address, mac_address, **kwargs):
delete_neigh_entry(ip_address,
mac_address,
self.name,
self._parent.namespace,
**kwargs)
def dump(self, ip_version, **kwargs):
return dump_neigh_entries(ip_version,
self.name,
self._parent.namespace,
**kwargs)
def flush(self, ip_version, ip_address):
"""Flush neighbour entries
Given address entry is removed from neighbour cache (ARP or NDP). To
flush all entries pass string 'all' as an address.
:param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively
:param ip_address: The prefix selecting the neighbours to flush
"""
# NOTE(haleyb): There is no equivalent to 'flush' in pyroute2
self._as_root([ip_version], ('flush', 'to', ip_address))
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
create_network_namespace(name)
wrapper = IPWrapper(namespace=name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
delete_network_namespace(name)
def execute(self, cmds, addl_env=None, check_exit_code=True,
log_fail_as_error=True, extra_ok_codes=None,
run_as_root=False):
ns_params = []
if self._parent.namespace:
run_as_root = True
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
cmd = ns_params + env_params + list(cmds)
return utils.execute(cmd, check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes,
log_fail_as_error=log_fail_as_error,
run_as_root=run_as_root)
def exists(self, name):
return network_namespace_exists(name)
def vlan_in_use(segmentation_id, namespace=None):
"""Return True if VLAN ID is in use by an interface, else False."""
ip_wrapper = IPWrapper(namespace=namespace)
interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"],
check_exit_code=True)
return '802.1Q id %s ' % segmentation_id in interfaces
def vxlan_in_use(segmentation_id, namespace=None):
"""Return True if VXLAN VNID is in use by an interface, else False."""
ip_wrapper = IPWrapper(namespace=namespace)
interfaces = ip_wrapper.netns.execute(["ip", "-d", "link", "list"],
check_exit_code=True)
return 'vxlan id %s ' % segmentation_id in interfaces
def device_exists(device_name, namespace=None):
"""Return True if the device exists in the namespace."""
return IPDevice(device_name, namespace=namespace).exists()
def device_exists_with_ips_and_mac(device_name, ip_cidrs, mac, namespace=None):
"""Return True if the device with the given IP addresses and MAC address
exists in the namespace.
"""
try:
device = IPDevice(device_name, namespace=namespace)
if mac and mac != device.link.address:
return False
device_ip_cidrs = [ip['cidr'] for ip in device.addr.list()]
for ip_cidr in ip_cidrs:
if ip_cidr not in device_ip_cidrs:
return False
except RuntimeError:
return False
else:
return True
def get_device_mac(device_name, namespace=None):
"""Return the MAC address of the device."""
return IPDevice(device_name, namespace=namespace).link.address
def get_device_mtu(device_name, namespace=None):
"""Return the MTU value of the device."""
return IPDevice(device_name, namespace=namespace).link.mtu
NetworkNamespaceNotFound = privileged.NetworkNamespaceNotFound
NetworkInterfaceNotFound = privileged.NetworkInterfaceNotFound
def add_ip_address(cidr, device, namespace=None, scope='global',
add_broadcast=True):
"""Add an IP address.
:param cidr: IP address to add, in CIDR notation
:param device: Device name to use in adding address
:param namespace: The name of the namespace in which to add the address
:param scope: scope of address being added
:param add_broadcast: should broadcast address be added
"""
net = netaddr.IPNetwork(cidr)
broadcast = None
if add_broadcast and net.version == 4:
# NOTE(slaweq): in case if cidr is /32 net.broadcast is None so
# same IP address as cidr should be set as broadcast
broadcast = str(net.broadcast or net.ip)
privileged.add_ip_address(
net.version, str(net.ip), net.prefixlen,
device, namespace, scope, broadcast)
def delete_ip_address(cidr, device, namespace=None):
"""Delete an IP address.
:param cidr: IP address to delete, in CIDR notation
:param device: Device name to use in deleting address
:param namespace: The name of the namespace in which to delete the address
"""
net = netaddr.IPNetwork(cidr)
privileged.delete_ip_address(
net.version, str(net.ip), net.prefixlen, device, namespace)
def flush_ip_addresses(ip_version, device, namespace=None):
"""Flush all IP addresses.
:param ip_version: IP version of addresses to flush
:param device: Device name to use in flushing addresses
:param namespace: The name of the namespace in which to flush the addresses
"""
privileged.flush_ip_addresses(ip_version, device, namespace)
def get_routing_table(ip_version, namespace=None):
"""Return a list of dictionaries, each representing a route.
@param ip_version: the routes of version to return, for example 4
@param namespace
@return: a list of dictionaries, each representing a route.
The dictionary format is: {'destination': cidr,
'nexthop': ip,
'device': device_name,
'scope': scope}
"""
# oslo.privsep turns lists to tuples in its IPC code. Change it back
return list(privileged.get_routing_table(ip_version, namespace))
# NOTE(haleyb): These neighbour functions live outside the IpNeighCommand
# class since not all callers require it.
def add_neigh_entry(ip_address, mac_address, device, namespace=None, **kwargs):
"""Add a neighbour entry.
:param ip_address: IP address of entry to add
:param mac_address: MAC address of entry to add
:param device: Device name to use in adding entry
:param namespace: The name of the namespace in which to add the entry
"""
ip_version = common_utils.get_ip_version(ip_address)
privileged.add_neigh_entry(ip_version,
ip_address,
mac_address,
device,
namespace,
**kwargs)
def delete_neigh_entry(ip_address, mac_address, device, namespace=None,
**kwargs):
"""Delete a neighbour entry.
:param ip_address: IP address of entry to delete
:param mac_address: MAC address of entry to delete
:param device: Device name to use in deleting entry
:param namespace: The name of the namespace in which to delete the entry
"""
ip_version = common_utils.get_ip_version(ip_address)
privileged.delete_neigh_entry(ip_version,
ip_address,
mac_address,
device,
namespace,
**kwargs)
def dump_neigh_entries(ip_version, device=None, namespace=None, **kwargs):
"""Dump all neighbour entries.
:param ip_version: IP version of entries to show (4 or 6)
:param device: Device name to use in dumping entries
:param namespace: The name of the namespace in which to dump the entries
:param kwargs: Callers add any filters they use as kwargs
:return: a list of dictionaries, each representing a neighbour.
The dictionary format is: {'dst': ip_address,
'lladdr': mac_address,
'device': device_name}
"""
return list(privileged.dump_neigh_entries(ip_version,
device,
namespace,
**kwargs))
def create_network_namespace(namespace, **kwargs):
"""Create a network namespace.
:param namespace: The name of the namespace to create
:param kwargs: Callers add any filters they use as kwargs
"""
privileged.create_netns(namespace, **kwargs)
def delete_network_namespace(namespace, **kwargs):
"""Delete a network namespace.
:param namespace: The name of the namespace to delete
:param kwargs: Callers add any filters they use as kwargs
"""
privileged.remove_netns(namespace, **kwargs)
def list_network_namespaces(**kwargs):
"""List all network namespace entries.
:param kwargs: Callers add any filters they use as kwargs
"""
if cfg.CONF.AGENT.use_helper_for_ns_read:
return privileged.list_netns(**kwargs)
else:
return netns.listnetns(**kwargs)
def network_namespace_exists(namespace, **kwargs):
"""Check if a network namespace exists.
:param namespace: The name of the namespace to check
:param kwargs: Callers add any filters they use as kwargs
"""
output = list_network_namespaces(**kwargs)
return namespace in output
def ensure_device_is_ready(device_name, namespace=None):
dev = IPDevice(device_name, namespace=namespace)
try:
# Ensure the device has a MAC address and is up, even if it is already
# up. If the device doesn't exist, a RuntimeError will be raised.
if not dev.link.address:
LOG.error("Device %s cannot be used as it has no MAC "
"address", device_name)
return False
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg):
command += ['help']
stdout, stderr = utils.execute(command, check_exit_code=False,
return_stderr=True, log_fail_as_error=False)
return any(arg in line for line in stderr.split('\n'))
def _arping(ns_name, iface_name, address, count, log_exception):
# Due to a Linux kernel bug*, it's advised to spread gratuitous updates
# more, injecting an interval between consequent packets that is longer
# than 1s which is currently hardcoded** in arping. To achieve that, we
# call arping tool the 'count' number of times, each issuing a single ARP
# update, and wait between iterations.
#
# * https://patchwork.ozlabs.org/patch/760372/
# ** https://github.com/iputils/iputils/pull/86
first = True
# Since arping is used to send gratuitous ARP, a response is
# not expected. In some cases (no response) and with some
# platforms (>=Ubuntu 14.04), arping exit code can be 1.
extra_ok_codes = [1]
ip_wrapper = IPWrapper(namespace=ns_name)
for i in range(count):
if not first:
# hopefully enough for kernel to get out of locktime loop
time.sleep(2)
# On the second (and subsequent) arping calls, we can get a
# "bind: Cannot assign requested address" error since
# the IP address might have been deleted concurrently.
# We will log an error below if this isn't the case, so
# no need to have execute() log one as well.
extra_ok_codes = [1, 2]
first = False
# some Linux kernels* don't honour REPLYs. Send both gratuitous REQUEST
# and REPLY packets (REQUESTs are left for backwards compatibility for
# in case if some network peers, vice versa, honor REPLYs and not
# REQUESTs)
#
# * https://patchwork.ozlabs.org/patch/763016/
for arg in ('-U', '-A'):
arping_cmd = ['arping', arg, '-I', iface_name, '-c', 1,
# Pass -w to set timeout to ensure exit if interface
# removed while running
'-w', 1.5, address]
try:
ip_wrapper.netns.execute(arping_cmd,
extra_ok_codes=extra_ok_codes)
except Exception as exc:
# Since this is spawned in a thread and executed 2 seconds
# apart, something may have been deleted while we were
# sleeping. Downgrade message to info and return early
# unless it was the first try.
exists = device_exists_with_ips_and_mac(iface_name,
[address],
mac=None,
namespace=ns_name)
msg = _("Failed sending gratuitous ARP to %(addr)s on "
"%(iface)s in namespace %(ns)s: %(err)s")
logger_method = LOG.exception
if not (log_exception and (first or exists)):
logger_method = LOG.info
logger_method(msg, {'addr': address,
'iface': iface_name,
'ns': ns_name,
'err': exc})
if not exists:
LOG.info("Interface %(iface)s or address %(addr)s "
"in namespace %(ns)s was deleted concurrently",
{'iface': iface_name,
'addr': address,
'ns': ns_name})
return
def send_ip_addr_adv_notif(
ns_name, iface_name, address, count=3, log_exception=True):
"""Send advance notification of an IP address assignment.
If the address is in the IPv4 family, send gratuitous ARP.
If the address is in the IPv6 family, no advance notification is
necessary, since the Neighbor Discovery Protocol (NDP), Duplicate
Address Discovery (DAD), and (for stateless addresses) router
advertisements (RAs) are sufficient for address resolution and
duplicate address detection.
:param ns_name: Namespace name which GARPs are gonna be sent from.
:param iface_name: Name of interface which GARPs are gonna be sent from.
:param address: Advertised IP address.
:param count: (Optional) How many GARPs are gonna be sent. Default is 3.
:param log_exception: (Optional) True if possible failures should be logged
on exception level. Otherwise they are logged on
WARNING level. Default is True.
"""
def arping():
_arping(ns_name, iface_name, address, count, log_exception)
if count > 0 and netaddr.IPAddress(address).version == 4:
eventlet.spawn_n(arping)
def sysctl(cmd, namespace=None, log_fail_as_error=True):
"""Run sysctl command 'cmd'
@param cmd: a list containing the sysctl command to run
@param namespace: network namespace to run command in
@param log_fail_as_error: failure logged as LOG.error
execute() doesn't return the exit status of the command it runs,
it returns stdout and stderr. Setting check_exit_code=True will cause
it to raise a RuntimeError if the exit status of the command is
non-zero, which in sysctl's case is an error. So we're normalizing
that into zero (success) and one (failure) here to mimic what
"echo $?" in a shell would be.
This is all because sysctl is too verbose and prints the value you
just set on success, unlike most other utilities that print nothing.
execute() will have dumped a message to the logs with the actual
output on failure, so it's not lost, and we don't need to print it
here.
"""
cmd = ['sysctl', '-w'] + cmd
ip_wrapper = IPWrapper(namespace=namespace)
try:
ip_wrapper.netns.execute(cmd, run_as_root=True,
log_fail_as_error=log_fail_as_error)
except RuntimeError as rte:
LOG.warning(
"Setting %(cmd)s in namespace %(ns)s failed: %(err)s.",
{'cmd': cmd,
'ns': namespace,
'err': rte})
return 1
return 0
def add_namespace_to_cmd(cmd, namespace=None):
"""Add an optional namespace to the command."""
return ['ip', 'netns', 'exec', namespace] + cmd if namespace else cmd
def get_ipv6_lladdr(mac_addr):
return '%s/64' % netaddr.EUI(mac_addr).ipv6_link_local()
def get_ip_nonlocal_bind(namespace=None):
"""Get kernel option value of ip_nonlocal_bind in given namespace."""
cmd = ['sysctl', '-bn', IP_NONLOCAL_BIND]
ip_wrapper = IPWrapper(namespace)
return int(ip_wrapper.netns.execute(cmd, run_as_root=True))
def set_ip_nonlocal_bind(value, namespace=None, log_fail_as_error=True):
"""Set sysctl knob of ip_nonlocal_bind to given value."""
cmd = ['%s=%d' % (IP_NONLOCAL_BIND, value)]
return sysctl(cmd, namespace=namespace,
log_fail_as_error=log_fail_as_error)
def set_ip_nonlocal_bind_for_namespace(namespace):
"""Set ip_nonlocal_bind but don't raise exception on failure."""
failed = set_ip_nonlocal_bind(value=0, namespace=namespace,
log_fail_as_error=False)
if failed:
LOG.warning(
"%s will not be set to 0 in the root namespace in order to "
"not break DVR, which requires this value be set to 1. This "
"may introduce a race between moving a floating IP to a "
"different network node, and the peer side getting a "
"populated ARP cache for a given floating IP address.",
IP_NONLOCAL_BIND)
def get_ipv6_forwarding(device, namespace=None):
"""Get kernel value of IPv6 forwarding for device in given namespace."""
cmd = ['sysctl', '-b', "net.ipv6.conf.%s.forwarding" % device]
ip_wrapper = IPWrapper(namespace)
return int(ip_wrapper.netns.execute(cmd, run_as_root=True))
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format.
Simple usage example:
# Create a proto object and serialize it to a json format string.
message = my_proto_pb2.MyMessage(foo='bar')
json_string = json_format.MessageToJson(message)
# Parse a json format string to proto object.
message = json_format.Parse(json_string, my_proto_pb2.MyMessage())
"""
__author__ = 'jieluo@google.com (Jie Luo)'
import base64
import json
import math
import six
import sys
from typy.google.protobuf import descriptor
from typy.google.protobuf import symbol_database
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
_INFINITY = 'Infinity'
_NEG_INFINITY = '-Infinity'
_NAN = 'NaN'
class Error(Exception):
"""Top-level module error for json_format."""
class SerializeToJsonError(Error):
"""Thrown if serialization to JSON fails."""
class ParseError(Error):
"""Thrown in case of parsing error."""
def MessageToJson(message, including_default_value_fields=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
js = _MessageToJsonObject(message, including_default_value_fields)
return json.dumps(js, indent=2)
def _MessageToJsonObject(message, including_default_value_fields):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return _WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return _WKTJSONMETHODS[full_name][0](
message, including_default_value_fields)
js = {}
return _RegularMessageToJsonObject(
message, js, including_default_value_fields)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def _RegularMessageToJsonObject(message, js, including_default_value_fields):
"""Converts normal message according to Proto3 JSON Specification."""
fields = message.ListFields()
include_default = including_default_value_fields
try:
for field, value in fields:
name = field.camelcase_name
if _IsMapEntry(field):
# Convert a map field.
v_field = field.message_type.fields_by_name['value']
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = 'true'
else:
recorded_key = 'false'
else:
recorded_key = key
js_map[recorded_key] = _FieldToJsonObject(
v_field, value[key], including_default_value_fields)
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
js[name] = [_FieldToJsonObject(field, k, include_default)
for k in value]
else:
js[name] = _FieldToJsonObject(field, value, include_default)
# Serialize default value if including_default_value_fields is True.
if including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
name = field.camelcase_name
if name in js:
# Skip the field which has been serailized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = _FieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
'Failed to serialize {0} field: {1}.'.format(field.name, e))
return js
def _FieldToJsonObject(
field, value, including_default_value_fields=False):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return _MessageToJsonObject(value, including_default_value_fields)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
raise SerializeToJsonError('Enum field contains an integer value '
'which can not mapped to an enum value.')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use base64 Data encoding for bytes
return base64.b64encode(value).decode('utf-8')
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return bool(value)
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return _NEG_INFINITY
else:
return _INFINITY
if math.isnan(value):
return _NAN
return value
def _AnyMessageToJsonObject(message, including_default):
"""Converts Any message according to Proto3 JSON Specification."""
if not message.ListFields():
return {}
js = {}
type_url = message.type_url
js['@type'] = type_url
sub_message = _CreateMessageFromTypeUrl(type_url)
sub_message.ParseFromString(message.value)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
js['value'] = _WrapperMessageToJsonObject(sub_message)
return js
if full_name in _WKTJSONMETHODS:
js['value'] = _WKTJSONMETHODS[full_name][0](sub_message, including_default)
return js
return _RegularMessageToJsonObject(sub_message, js, including_default)
def _CreateMessageFromTypeUrl(type_url):
# TODO(jieluo): Should add a way that users can register the type resolver
# instead of the default one.
db = symbol_database.Default()
type_name = type_url.split('/')[-1]
try:
message_descriptor = db.pool.FindMessageTypeByName(type_name)
except KeyError:
raise TypeError(
'Can not find message descriptor by type_url: {0}.'.format(type_url))
message_class = db.GetPrototype(message_descriptor)
return message_class()
def _GenericMessageToJsonObject(message, unused_including_default):
"""Converts message by ToJsonString according to Proto3 JSON Specification."""
# Duration, Timestamp and FieldMask have ToJsonString method to do the
# convert. Users can also call the method directly.
return message.ToJsonString()
def _ValueMessageToJsonObject(message, unused_including_default=False):
"""Converts Value message according to Proto3 JSON Specification."""
which = message.WhichOneof('kind')
# If the Value message is not set treat as null_value when serialize
# to JSON. The parse back result will be different from original message.
if which is None or which == 'null_value':
return None
if which == 'list_value':
return _ListValueMessageToJsonObject(message.list_value)
if which == 'struct_value':
value = message.struct_value
else:
value = getattr(message, which)
oneof_descriptor = message.DESCRIPTOR.fields_by_name[which]
return _FieldToJsonObject(oneof_descriptor, value)
def _ListValueMessageToJsonObject(message, unused_including_default=False):
"""Converts ListValue message according to Proto3 JSON Specification."""
return [_ValueMessageToJsonObject(value)
for value in message.values]
def _StructMessageToJsonObject(message, unused_including_default=False):
"""Converts Struct message according to Proto3 JSON Specification."""
fields = message.fields
ret = {}
for key in fields:
ret[key] = _ValueMessageToJsonObject(fields[key])
return ret
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == 'google/protobuf/wrappers.proto'
def _WrapperMessageToJsonObject(message):
return _FieldToJsonObject(
message.DESCRIPTOR.fields_by_name['value'], message.value)
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name))
result[name] = value
return result
def Parse(text, message):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol beffer message to merge into.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, six.text_type): text = text.decode('utf-8')
try:
if sys.version_info < (2, 7):
# object_pair_hook is not supported before python2.7
js = json.loads(text)
else:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError('Failed to load JSON: {0}.'.format(str(e)))
_ConvertMessage(js, message)
return message
def _ConvertFieldValuePair(js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
for name in js:
try:
field = message_descriptor.fields_by_camelcase_name.get(name, None)
if not field:
raise ParseError(
'Message type "{0}" has no field named "{1}".'.format(
message_descriptor.full_name, name))
if name in names:
raise ParseError(
'Message type "{0}" should not have multiple "{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
# Check no other oneof field is parsed.
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple "{1}" '
'oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
_ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}.'.format(name, value))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for item in value:
sub_message = getattr(message, field.name).add()
# None is a null_value in Value.
if (item is None and
sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
_ConvertMessage(item, sub_message)
else:
# Repeated scalar field.
for item in value:
if item is None:
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name)
_ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
def _ConvertMessage(value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
_ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
_WKTJSONMETHODS[full_name][1](value, message)
else:
_ConvertFieldValuePair(value, message)
def _ConvertAnyMessage(value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError:
raise ParseError('@type is missing when parsing any message.')
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
_ConvertWrapperMessage(value['value'], sub_message)
elif full_name in _WKTJSONMETHODS:
_WKTJSONMETHODS[full_name][1](value['value'], sub_message)
else:
del value['@type']
_ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(value, message):
"""Convert a JSON representation into message with FromJsonString."""
# Durantion, Timestamp, FieldMask have FromJsonString method to do the
# convert. Users can also call the method directly.
message.FromJsonString(value)
_INT_OR_FLOAT = six.integer_types + (float,)
def _ConvertValueMessage(value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
_ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
_ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, six.string_types):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError('Unexpected type for Value message.')
def _ConvertListValueMessage(value, message):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, list):
raise ParseError(
'ListValue must be in [] which is {0}.'.format(value))
message.ClearField('values')
for item in value:
_ConvertValueMessage(item, message.values.add())
def _ConvertStructMessage(value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
_ConvertValueMessage(value[key], message.fields[key])
return
def _ConvertWrapperMessage(value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1}.'.format(
field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
_ConvertMessage(value[key], getattr(message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field)
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
raise ParseError(
'Enum value must be a string literal with double quotes. '
'Type "{0}" has no value named {1}.'.format(
field.enum_type.full_name, value))
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float):
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value)
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value))
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value
_WKTJSONMETHODS = {
'google.protobuf.Any': [_AnyMessageToJsonObject,
_ConvertAnyMessage],
'google.protobuf.Duration': [_GenericMessageToJsonObject,
_ConvertGenericMessage],
'google.protobuf.FieldMask': [_GenericMessageToJsonObject,
_ConvertGenericMessage],
'google.protobuf.ListValue': [_ListValueMessageToJsonObject,
_ConvertListValueMessage],
'google.protobuf.Struct': [_StructMessageToJsonObject,
_ConvertStructMessage],
'google.protobuf.Timestamp': [_GenericMessageToJsonObject,
_ConvertGenericMessage],
'google.protobuf.Value': [_ValueMessageToJsonObject,
_ConvertValueMessage]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.